diff --git a/[refs] b/[refs] index be9b03908fae..85662abade28 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: f31c338675872875e24f124af0689131b0c72600 +refs/heads/master: 7f1711234e6a21c153e892758d9d82c333ab37ac diff --git a/trunk/Documentation/RCU/RTFP.txt b/trunk/Documentation/RCU/RTFP.txt index 39ad8f56783a..6221464d1a7e 100644 --- a/trunk/Documentation/RCU/RTFP.txt +++ b/trunk/Documentation/RCU/RTFP.txt @@ -9,8 +9,8 @@ The first thing resembling RCU was published in 1980, when Kung and Lehman [Kung80] recommended use of a garbage collector to defer destruction of nodes in a parallel binary search tree in order to simplify its implementation. This works well in environments that have garbage -collectors, but most production garbage collectors incur significant -overhead. +collectors, but current production garbage collectors incur significant +read-side overhead. In 1982, Manber and Ladner [Manber82,Manber84] recommended deferring destruction until all threads running at that time have terminated, again @@ -99,25 +99,16 @@ locking, reduces contention, reduces memory latency for readers, and parallelizes pipeline stalls and memory latency for writers. However, these techniques still impose significant read-side overhead in the form of memory barriers. Researchers at Sun worked along similar lines -in the same timeframe [HerlihyLM02]. These techniques can be thought -of as inside-out reference counts, where the count is represented by the -number of hazard pointers referencing a given data structure (rather than -the more conventional counter field within the data structure itself). - -By the same token, RCU can be thought of as a "bulk reference count", -where some form of reference counter covers all reference by a given CPU -or thread during a set timeframe. This timeframe is related to, but -not necessarily exactly the same as, an RCU grace period. In classic -RCU, the reference counter is the per-CPU bit in the "bitmask" field, -and each such bit covers all references that might have been made by -the corresponding CPU during the prior grace period. Of course, RCU -can be thought of in other terms as well. +in the same timeframe [HerlihyLM02,HerlihyLMS03]. These techniques +can be thought of as inside-out reference counts, where the count is +represented by the number of hazard pointers referencing a given data +structure (rather than the more conventional counter field within the +data structure itself). In 2003, the K42 group described how RCU could be used to create -hot-pluggable implementations of operating-system functions [Appavoo03a]. -Later that year saw a paper describing an RCU implementation of System -V IPC [Arcangeli03], and an introduction to RCU in Linux Journal -[McKenney03a]. +hot-pluggable implementations of operating-system functions. Later that +year saw a paper describing an RCU implementation of System V IPC +[Arcangeli03], and an introduction to RCU in Linux Journal [McKenney03a]. 2004 has seen a Linux-Journal article on use of RCU in dcache [McKenney04a], a performance comparison of locking to RCU on several @@ -126,19 +117,10 @@ number of operating-system kernels [PaulEdwardMcKenneyPhD], a paper describing how to make RCU safe for soft-realtime applications [Sarma04c], and a paper describing SELinux performance with RCU [JamesMorris04b]. -2005 brought further adaptation of RCU to realtime use, permitting +2005 has seen further adaptation of RCU to realtime use, permitting preemption of RCU realtime critical sections [PaulMcKenney05a, PaulMcKenney05b]. -2006 saw the first best-paper award for an RCU paper [ThomasEHart2006a], -as well as further work on efficient implementations of preemptible -RCU [PaulEMcKenney2006b], but priority-boosting of RCU read-side critical -sections proved elusive. An RCU implementation permitting general -blocking in read-side critical sections appeared [PaulEMcKenney2006c], -Robert Olsson described an RCU-protected trie-hash combination -[RobertOlsson2006a]. - - Bibtex Entries @article{Kung80 @@ -221,41 +203,6 @@ Bibtex Entries ,Address="New Orleans, LA" } -@conference{Pu95a, -Author = "Calton Pu and Tito Autrey and Andrew Black and Charles Consel and -Crispin Cowan and Jon Inouye and Lakshmi Kethana and Jonathan Walpole and -Ke Zhang", -Title = "Optimistic Incremental Specialization: Streamlining a Commercial -Operating System", -Booktitle = "15\textsuperscript{th} ACM Symposium on -Operating Systems Principles (SOSP'95)", -address = "Copper Mountain, CO", -month="December", -year="1995", -pages="314-321", -annotation=" - Uses a replugger, but with a flag to signal when people are - using the resource at hand. Only one reader at a time. -" -} - -@conference{Cowan96a, -Author = "Crispin Cowan and Tito Autrey and Charles Krasic and -Calton Pu and Jonathan Walpole", -Title = "Fast Concurrent Dynamic Linking for an Adaptive Operating System", -Booktitle = "International Conference on Configurable Distributed Systems -(ICCDS'96)", -address = "Annapolis, MD", -month="May", -year="1996", -pages="108", -isbn="0-8186-7395-8", -annotation=" - Uses a replugger, but with a counter to signal when people are - using the resource at hand. Allows multiple readers. -" -} - @techreport{Slingwine95 ,author="John D. Slingwine and Paul E. McKenney" ,title="Apparatus and Method for Achieving Reduced Overhead Mutual @@ -365,49 +312,6 @@ Andrea Arcangeli and Andi Kleen and Orran Krieger and Rusty Russell" [Viewed June 23, 2004]" } -@conference{Michael02a -,author="Maged M. Michael" -,title="Safe Memory Reclamation for Dynamic Lock-Free Objects Using Atomic -Reads and Writes" -,Year="2002" -,Month="August" -,booktitle="{Proceedings of the 21\textsuperscript{st} Annual ACM -Symposium on Principles of Distributed Computing}" -,pages="21-30" -,annotation=" - Each thread keeps an array of pointers to items that it is - currently referencing. Sort of an inside-out garbage collection - mechanism, but one that requires the accessing code to explicitly - state its needs. Also requires read-side memory barriers on - most architectures. -" -} - -@conference{Michael02b -,author="Maged M. Michael" -,title="High Performance Dynamic Lock-Free Hash Tables and List-Based Sets" -,Year="2002" -,Month="August" -,booktitle="{Proceedings of the 14\textsuperscript{th} Annual ACM -Symposium on Parallel -Algorithms and Architecture}" -,pages="73-82" -,annotation=" - Like the title says... -" -} - -@InProceedings{HerlihyLM02 -,author={Maurice Herlihy and Victor Luchangco and Mark Moir} -,title="The Repeat Offender Problem: A Mechanism for Supporting Dynamic-Sized, -Lock-Free Data Structures" -,booktitle={Proceedings of 16\textsuperscript{th} International -Symposium on Distributed Computing} -,year=2002 -,month="October" -,pages="339-353" -} - @article{Appavoo03a ,author="J. Appavoo and K. Hui and C. A. N. Soules and R. W. Wisniewski and D. M. {Da Silva} and O. Krieger and M. A. Auslander and D. J. Edelsohn and @@ -543,95 +447,3 @@ Oregon Health and Sciences University" Realtime turns into making RCU yet more realtime friendly. " } - -@conference{ThomasEHart2006a -,Author="Thomas E. Hart and Paul E. McKenney and Angela Demke Brown" -,Title="Making Lockless Synchronization Fast: Performance Implications -of Memory Reclamation" -,Booktitle="20\textsuperscript{th} {IEEE} International Parallel and -Distributed Processing Symposium" -,month="April" -,year="2006" -,day="25-29" -,address="Rhodes, Greece" -,annotation=" - Compares QSBR (AKA "classic RCU"), HPBR, EBR, and lock-free - reference counting. -" -} - -@Conference{PaulEMcKenney2006b -,Author="Paul E. McKenney and Dipankar Sarma and Ingo Molnar and -Suparna Bhattacharya" -,Title="Extending RCU for Realtime and Embedded Workloads" -,Booktitle="{Ottawa Linux Symposium}" -,Month="July" -,Year="2006" -,pages="v2 123-138" -,note="Available: -\url{http://www.linuxsymposium.org/2006/view_abstract.php?content_key=184} -\url{http://www.rdrop.com/users/paulmck/RCU/OLSrtRCU.2006.08.11a.pdf} -[Viewed January 1, 2007]" -,annotation=" - Described how to improve the -rt implementation of realtime RCU. -" -} - -@unpublished{PaulEMcKenney2006c -,Author="Paul E. McKenney" -,Title="Sleepable {RCU}" -,month="October" -,day="9" -,year="2006" -,note="Available: -\url{http://lwn.net/Articles/202847/} -Revised: -\url{http://www.rdrop.com/users/paulmck/RCU/srcu.2007.01.14a.pdf} -[Viewed August 21, 2006]" -,annotation=" - LWN article introducing SRCU. -" -} - -@unpublished{RobertOlsson2006a -,Author="Robert Olsson and Stefan Nilsson" -,Title="{TRASH}: A dynamic {LC}-trie and hash data structure" -,month="August" -,day="18" -,year="2006" -,note="Available: -\url{http://www.nada.kth.se/~snilsson/public/papers/trash/trash.pdf} -[Viewed February 24, 2007]" -,annotation=" - RCU-protected dynamic trie-hash combination. -" -} - -@unpublished{ThomasEHart2007a -,Author="Thomas E. Hart and Paul E. McKenney and Angela Demke Brown and Jonathan Walpole" -,Title="Performance of memory reclamation for lockless synchronization" -,journal="J. Parallel Distrib. Comput." -,year="2007" -,note="To appear in J. Parallel Distrib. Comput. - \url{doi=10.1016/j.jpdc.2007.04.010}" -,annotation={ - Compares QSBR (AKA "classic RCU"), HPBR, EBR, and lock-free - reference counting. Journal version of ThomasEHart2006a. -} -} - -@unpublished{PaulEMcKenney2007QRCUspin -,Author="Paul E. McKenney" -,Title="Using Promela and Spin to verify parallel algorithms" -,month="August" -,day="1" -,year="2007" -,note="Available: -\url{http://lwn.net/Articles/243851/} -[Viewed September 8, 2007]" -,annotation=" - LWN article describing Promela and spin, and also using Oleg - Nesterov's QRCU as an example (with Paul McKenney's fastpath). -" -} - diff --git a/trunk/Documentation/RCU/rcu.txt b/trunk/Documentation/RCU/rcu.txt index 95821a29ae41..f84407cba816 100644 --- a/trunk/Documentation/RCU/rcu.txt +++ b/trunk/Documentation/RCU/rcu.txt @@ -36,14 +36,6 @@ o How can the updater tell when a grace period has completed executed in user mode, or executed in the idle loop, we can safely free up that item. - Preemptible variants of RCU (CONFIG_PREEMPT_RCU) get the - same effect, but require that the readers manipulate CPU-local - counters. These counters allow limited types of blocking - within RCU read-side critical sections. SRCU also uses - CPU-local counters, and permits general blocking within - RCU read-side critical sections. These two variants of - RCU detect grace periods by sampling these counters. - o If I am running on a uniprocessor kernel, which can only do one thing at a time, why should I wait for a grace period? @@ -54,10 +46,7 @@ o How can I see where RCU is currently used in the Linux kernel? Search for "rcu_read_lock", "rcu_read_unlock", "call_rcu", "rcu_read_lock_bh", "rcu_read_unlock_bh", "call_rcu_bh", "srcu_read_lock", "srcu_read_unlock", "synchronize_rcu", - "synchronize_net", "synchronize_srcu", and the other RCU - primitives. Or grab one of the cscope databases from: - - http://www.rdrop.com/users/paulmck/RCU/linuxusage/rculocktab.html + "synchronize_net", and "synchronize_srcu". o What guidelines should I follow when writing code that uses RCU? @@ -78,11 +67,7 @@ o I hear that RCU is patented? What is with that? o I hear that RCU needs work in order to support realtime kernels? - This work is largely completed. Realtime-friendly RCU can be - enabled via the CONFIG_PREEMPT_RCU kernel configuration parameter. - However, work is in progress for enabling priority boosting of - preempted RCU read-side critical sections.This is needed if you - have CPU-bound realtime threads. + Yes, work in progress. o Where can I find more information on RCU? diff --git a/trunk/Documentation/RCU/torture.txt b/trunk/Documentation/RCU/torture.txt index 2967a65269d8..25a3c3f7d378 100644 --- a/trunk/Documentation/RCU/torture.txt +++ b/trunk/Documentation/RCU/torture.txt @@ -46,13 +46,12 @@ stat_interval The number of seconds between output of torture shuffle_interval The number of seconds to keep the test threads affinitied - to a particular subset of the CPUs, defaults to 5 seconds. - Used in conjunction with test_no_idle_hz. + to a particular subset of the CPUs. Used in conjunction + with test_no_idle_hz. test_no_idle_hz Whether or not to test the ability of RCU to operate in a kernel that disables the scheduling-clock interrupt to idle CPUs. Boolean parameter, "1" to test, "0" otherwise. - Defaults to omitting this test. torture_type The type of RCU to test: "rcu" for the rcu_read_lock() API, "rcu_sync" for rcu_read_lock() with synchronous reclamation, @@ -83,6 +82,8 @@ be evident. ;-) The entries are as follows: +o "ggp": The number of counter flips (or batches) since boot. + o "rtc": The hexadecimal address of the structure currently visible to readers. @@ -116,8 +117,8 @@ o "Reader Pipe": Histogram of "ages" of structures seen by readers. o "Reader Batch": Another histogram of "ages" of structures seen by readers, but in terms of counter flips (or batches) rather than in terms of grace periods. The legal number of non-zero - entries is again two. The reason for this separate view is that - it is sometimes easier to get the third entry to show up in the + entries is again two. The reason for this separate view is + that it is easier to get the third entry to show up in the "Reader Batch" list than in the "Reader Pipe" list. o "Free-Block Circulation": Shows the number of torture structures diff --git a/trunk/Documentation/cpu-hotplug.txt b/trunk/Documentation/cpu-hotplug.txt index fb94f5a71b68..a741f658a3c9 100644 --- a/trunk/Documentation/cpu-hotplug.txt +++ b/trunk/Documentation/cpu-hotplug.txt @@ -109,13 +109,12 @@ Never use anything other than cpumask_t to represent bitmap of CPUs. for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask. #include - get_online_cpus() and put_online_cpus(): + lock_cpu_hotplug() and unlock_cpu_hotplug(): -The above calls are used to inhibit cpu hotplug operations. While the -cpu_hotplug.refcount is non zero, the cpu_online_map will not change. -If you merely need to avoid cpus going away, you could also use -preempt_disable() and preempt_enable() for those sections. -Just remember the critical section cannot call any +The above calls are used to inhibit cpu hotplug operations. While holding the +cpucontrol mutex, cpu_online_map will not change. If you merely need to avoid +cpus going away, you could also use preempt_disable() and preempt_enable() +for those sections. Just remember the critical section cannot call any function that can sleep or schedule this process away. The preempt_disable() will work as long as stop_machine_run() is used to take a cpu down. diff --git a/trunk/arch/arm/kernel/time.c b/trunk/arch/arm/kernel/time.c index e59b5b84168d..f6f3689a86ee 100644 --- a/trunk/arch/arm/kernel/time.c +++ b/trunk/arch/arm/kernel/time.c @@ -79,6 +79,17 @@ static unsigned long dummy_gettimeoffset(void) } #endif +/* + * An implementation of printk_clock() independent from + * sched_clock(). This avoids non-bootable kernels when + * printk_clock is enabled. + */ +unsigned long long printk_clock(void) +{ + return (unsigned long long)(jiffies - INITIAL_JIFFIES) * + (1000000000 / HZ); +} + static unsigned long next_rtc_update; /* diff --git a/trunk/arch/ia64/kernel/setup.c b/trunk/arch/ia64/kernel/setup.c index 86028c69861e..4ac2b1f1bd3b 100644 --- a/trunk/arch/ia64/kernel/setup.c +++ b/trunk/arch/ia64/kernel/setup.c @@ -71,6 +71,8 @@ unsigned long __per_cpu_offset[NR_CPUS]; EXPORT_SYMBOL(__per_cpu_offset); #endif +extern void ia64_setup_printk_clock(void); + DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); unsigned long ia64_cycles_per_usec; @@ -505,6 +507,8 @@ setup_arch (char **cmdline_p) /* process SAL system table: */ ia64_sal_init(__va(efi.sal_systab)); + ia64_setup_printk_clock(); + #ifdef CONFIG_SMP cpu_physical_id(0) = hard_smp_processor_id(); #endif diff --git a/trunk/arch/ia64/kernel/time.c b/trunk/arch/ia64/kernel/time.c index 3ab042720970..2bb84214e5f1 100644 --- a/trunk/arch/ia64/kernel/time.c +++ b/trunk/arch/ia64/kernel/time.c @@ -344,6 +344,33 @@ udelay (unsigned long usecs) } EXPORT_SYMBOL(udelay); +static unsigned long long ia64_itc_printk_clock(void) +{ + if (ia64_get_kr(IA64_KR_PER_CPU_DATA)) + return sched_clock(); + return 0; +} + +static unsigned long long ia64_default_printk_clock(void) +{ + return (unsigned long long)(jiffies_64 - INITIAL_JIFFIES) * + (1000000000/HZ); +} + +unsigned long long (*ia64_printk_clock)(void) = &ia64_default_printk_clock; + +unsigned long long printk_clock(void) +{ + return ia64_printk_clock(); +} + +void __init +ia64_setup_printk_clock(void) +{ + if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) + ia64_printk_clock = ia64_itc_printk_clock; +} + /* IA64 doesn't cache the timezone */ void update_vsyscall_tz(void) { diff --git a/trunk/arch/ia64/sn/kernel/setup.c b/trunk/arch/ia64/sn/kernel/setup.c index bb1d24929640..1f38a3a68390 100644 --- a/trunk/arch/ia64/sn/kernel/setup.c +++ b/trunk/arch/ia64/sn/kernel/setup.c @@ -64,6 +64,7 @@ extern void sn_timer_init(void); extern unsigned long last_time_offset; extern void (*ia64_mark_idle) (int); extern void snidle(int); +extern unsigned long long (*ia64_printk_clock)(void); unsigned long sn_rtc_cycles_per_second; EXPORT_SYMBOL(sn_rtc_cycles_per_second); @@ -359,6 +360,14 @@ sn_scan_pcdp(void) static unsigned long sn2_rtc_initial; +static unsigned long long ia64_sn2_printk_clock(void) +{ + unsigned long rtc_now = rtc_time(); + + return (rtc_now - sn2_rtc_initial) * + (1000000000 / sn_rtc_cycles_per_second); +} + /** * sn_setup - SN platform setup routine * @cmdline_p: kernel command line @@ -459,6 +468,8 @@ void __init sn_setup(char **cmdline_p) platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR; + ia64_printk_clock = ia64_sn2_printk_clock; + printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF); /* diff --git a/trunk/arch/mips/kernel/mips-mt-fpaff.c b/trunk/arch/mips/kernel/mips-mt-fpaff.c index bb4f00c0cbe9..892665bb12b1 100644 --- a/trunk/arch/mips/kernel/mips-mt-fpaff.c +++ b/trunk/arch/mips/kernel/mips-mt-fpaff.c @@ -58,13 +58,13 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) return -EFAULT; - get_online_cpus(); + lock_cpu_hotplug(); read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (!p) { read_unlock(&tasklist_lock); - put_online_cpus(); + unlock_cpu_hotplug(); return -ESRCH; } @@ -106,7 +106,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, out_unlock: put_task_struct(p); - put_online_cpus(); + unlock_cpu_hotplug(); return retval; } @@ -125,7 +125,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, if (len < real_len) return -EINVAL; - get_online_cpus(); + lock_cpu_hotplug(); read_lock(&tasklist_lock); retval = -ESRCH; @@ -140,7 +140,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, out_unlock: read_unlock(&tasklist_lock); - put_online_cpus(); + unlock_cpu_hotplug(); if (retval) return retval; if (copy_to_user(user_mask_ptr, &mask, real_len)) diff --git a/trunk/arch/powerpc/platforms/pseries/hotplug-cpu.c b/trunk/arch/powerpc/platforms/pseries/hotplug-cpu.c index c4ad54e0f288..412e6b42986f 100644 --- a/trunk/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/trunk/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -153,7 +153,7 @@ static int pseries_add_processor(struct device_node *np) for (i = 0; i < nthreads; i++) cpu_set(i, tmp); - cpu_maps_update_begin(); + lock_cpu_hotplug(); BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map)); @@ -190,7 +190,7 @@ static int pseries_add_processor(struct device_node *np) } err = 0; out_unlock: - cpu_maps_update_done(); + unlock_cpu_hotplug(); return err; } @@ -211,7 +211,7 @@ static void pseries_remove_processor(struct device_node *np) nthreads = len / sizeof(u32); - cpu_maps_update_begin(); + lock_cpu_hotplug(); for (i = 0; i < nthreads; i++) { for_each_present_cpu(cpu) { if (get_hard_smp_processor_id(cpu) != intserv[i]) @@ -225,7 +225,7 @@ static void pseries_remove_processor(struct device_node *np) printk(KERN_WARNING "Could not find cpu to remove " "with physical id 0x%x\n", intserv[i]); } - cpu_maps_update_done(); + unlock_cpu_hotplug(); } static int pseries_smp_notifier(struct notifier_block *nb, diff --git a/trunk/arch/powerpc/platforms/pseries/rtasd.c b/trunk/arch/powerpc/platforms/pseries/rtasd.c index e3078ce41518..73401c820110 100644 --- a/trunk/arch/powerpc/platforms/pseries/rtasd.c +++ b/trunk/arch/powerpc/platforms/pseries/rtasd.c @@ -382,7 +382,7 @@ static void do_event_scan_all_cpus(long delay) { int cpu; - get_online_cpus(); + lock_cpu_hotplug(); cpu = first_cpu(cpu_online_map); for (;;) { set_cpus_allowed(current, cpumask_of_cpu(cpu)); @@ -390,15 +390,15 @@ static void do_event_scan_all_cpus(long delay) set_cpus_allowed(current, CPU_MASK_ALL); /* Drop hotplug lock, and sleep for the specified delay */ - put_online_cpus(); + unlock_cpu_hotplug(); msleep_interruptible(delay); - get_online_cpus(); + lock_cpu_hotplug(); cpu = next_cpu(cpu, cpu_online_map); if (cpu == NR_CPUS) break; } - put_online_cpus(); + unlock_cpu_hotplug(); } static int rtasd(void *unused) diff --git a/trunk/arch/x86/kernel/cpu/mtrr/main.c b/trunk/arch/x86/kernel/cpu/mtrr/main.c index beb45c9c0835..3b20613325dc 100644 --- a/trunk/arch/x86/kernel/cpu/mtrr/main.c +++ b/trunk/arch/x86/kernel/cpu/mtrr/main.c @@ -349,7 +349,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, replace = -1; /* No CPU hotplug when we change MTRR entries */ - get_online_cpus(); + lock_cpu_hotplug(); /* Search for existing MTRR */ mutex_lock(&mtrr_mutex); for (i = 0; i < num_var_ranges; ++i) { @@ -405,7 +405,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, error = i; out: mutex_unlock(&mtrr_mutex); - put_online_cpus(); + unlock_cpu_hotplug(); return error; } @@ -495,7 +495,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) max = num_var_ranges; /* No CPU hotplug when we change MTRR entries */ - get_online_cpus(); + lock_cpu_hotplug(); mutex_lock(&mtrr_mutex); if (reg < 0) { /* Search for existing MTRR */ @@ -536,7 +536,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) error = reg; out: mutex_unlock(&mtrr_mutex); - put_online_cpus(); + unlock_cpu_hotplug(); return error; } /** diff --git a/trunk/arch/x86/kernel/entry_64.S b/trunk/arch/x86/kernel/entry_64.S index e70f3881d7e4..3a058bb16409 100644 --- a/trunk/arch/x86/kernel/entry_64.S +++ b/trunk/arch/x86/kernel/entry_64.S @@ -283,7 +283,7 @@ sysret_careful: sysret_signal: TRACE_IRQS_ON sti - testl $_TIF_DO_NOTIFY_MASK,%edx + testl $(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx jz 1f /* Really a signal */ @@ -377,7 +377,7 @@ int_very_careful: jmp int_restore_rest int_signal: - testl $_TIF_DO_NOTIFY_MASK,%edx + testl $(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx jz 1f movq %rsp,%rdi # &ptregs -> arg1 xorl %esi,%esi # oldset -> arg2 @@ -603,7 +603,7 @@ retint_careful: jmp retint_check retint_signal: - testl $_TIF_DO_NOTIFY_MASK,%edx + testl $(_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY),%edx jz retint_swapgs TRACE_IRQS_ON sti diff --git a/trunk/arch/x86/kernel/microcode.c b/trunk/arch/x86/kernel/microcode.c index 40cfd5488719..09c315214a5e 100644 --- a/trunk/arch/x86/kernel/microcode.c +++ b/trunk/arch/x86/kernel/microcode.c @@ -436,7 +436,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_ return -EINVAL; } - get_online_cpus(); + lock_cpu_hotplug(); mutex_lock(µcode_mutex); user_buffer = (void __user *) buf; @@ -447,7 +447,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_ ret = (ssize_t)len; mutex_unlock(µcode_mutex); - put_online_cpus(); + unlock_cpu_hotplug(); return ret; } @@ -658,14 +658,14 @@ static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz) old = current->cpus_allowed; - get_online_cpus(); + lock_cpu_hotplug(); set_cpus_allowed(current, cpumask_of_cpu(cpu)); mutex_lock(µcode_mutex); if (uci->valid) err = cpu_request_microcode(cpu); mutex_unlock(µcode_mutex); - put_online_cpus(); + unlock_cpu_hotplug(); set_cpus_allowed(current, old); } if (err) @@ -817,9 +817,9 @@ static int __init microcode_init (void) return PTR_ERR(microcode_pdev); } - get_online_cpus(); + lock_cpu_hotplug(); error = sysdev_driver_register(&cpu_sysdev_class, &mc_sysdev_driver); - put_online_cpus(); + unlock_cpu_hotplug(); if (error) { microcode_dev_exit(); platform_device_unregister(microcode_pdev); @@ -839,9 +839,9 @@ static void __exit microcode_exit (void) unregister_hotcpu_notifier(&mc_cpu_notifier); - get_online_cpus(); + lock_cpu_hotplug(); sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver); - put_online_cpus(); + unlock_cpu_hotplug(); platform_device_unregister(microcode_pdev); } diff --git a/trunk/arch/x86/kernel/signal_32.c b/trunk/arch/x86/kernel/signal_32.c index 20f29e4c1d33..9bdd83022f5f 100644 --- a/trunk/arch/x86/kernel/signal_32.c +++ b/trunk/arch/x86/kernel/signal_32.c @@ -658,9 +658,6 @@ void do_notify_resume(struct pt_regs *regs, void *_unused, /* deal with pending signal delivery */ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) do_signal(regs); - - if (thread_info_flags & _TIF_HRTICK_RESCHED) - hrtick_resched(); clear_thread_flag(TIF_IRET); } diff --git a/trunk/arch/x86/kernel/signal_64.c b/trunk/arch/x86/kernel/signal_64.c index 38d806467c0f..ab086b0357fc 100644 --- a/trunk/arch/x86/kernel/signal_64.c +++ b/trunk/arch/x86/kernel/signal_64.c @@ -480,9 +480,6 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) /* deal with pending signal delivery */ if (thread_info_flags & (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK)) do_signal(regs); - - if (thread_info_flags & _TIF_HRTICK_RESCHED) - hrtick_resched(); } void signal_fault(struct pt_regs *regs, void __user *frame, char *where) diff --git a/trunk/arch/x86/kernel/stacktrace.c b/trunk/arch/x86/kernel/stacktrace.c index 55771fd7e545..6fa6cf036c70 100644 --- a/trunk/arch/x86/kernel/stacktrace.c +++ b/trunk/arch/x86/kernel/stacktrace.c @@ -33,19 +33,6 @@ static void save_stack_address(void *data, unsigned long addr) trace->entries[trace->nr_entries++] = addr; } -static void save_stack_address_nosched(void *data, unsigned long addr) -{ - struct stack_trace *trace = (struct stack_trace *)data; - if (in_sched_functions(addr)) - return; - if (trace->skip > 0) { - trace->skip--; - return; - } - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = addr; -} - static const struct stacktrace_ops save_stack_ops = { .warning = save_stack_warning, .warning_symbol = save_stack_warning_symbol, @@ -53,13 +40,6 @@ static const struct stacktrace_ops save_stack_ops = { .address = save_stack_address, }; -static const struct stacktrace_ops save_stack_ops_nosched = { - .warning = save_stack_warning, - .warning_symbol = save_stack_warning_symbol, - .stack = save_stack_stack, - .address = save_stack_address_nosched, -}; - /* * Save stack-backtrace addresses into a stack_trace buffer. */ @@ -70,10 +50,3 @@ void save_stack_trace(struct stack_trace *trace) trace->entries[trace->nr_entries++] = ULONG_MAX; } EXPORT_SYMBOL(save_stack_trace); - -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) -{ - dump_trace(tsk, NULL, NULL, &save_stack_ops_nosched, trace); - if (trace->nr_entries < trace->max_entries) - trace->entries[trace->nr_entries++] = ULONG_MAX; -} diff --git a/trunk/drivers/ide/Kconfig b/trunk/drivers/ide/Kconfig index ee01e273a537..fb06555708a8 100644 --- a/trunk/drivers/ide/Kconfig +++ b/trunk/drivers/ide/Kconfig @@ -374,6 +374,17 @@ comment "PCI IDE chipsets support" config BLK_DEV_IDEPCI bool +config IDEPCI_SHARE_IRQ + bool "Sharing PCI IDE interrupts support" + depends on BLK_DEV_IDEPCI + help + Some ATA/IDE chipsets have hardware support which allows for + sharing a single IRQ with other cards. To enable support for + this in the ATA/IDE driver, say Y here. + + It is safe to say Y to this question, in most cases. + If unsure, say N. + config IDEPCI_PCIBUS_ORDER def_bool BLK_DEV_IDE=y && BLK_DEV_IDEPCI @@ -696,6 +707,7 @@ config BLK_DEV_SVWKS config BLK_DEV_SGIIOC4 tristate "Silicon Graphics IOC4 chipset ATA/ATAPI support" depends on (IA64_SGI_SN2 || IA64_GENERIC) && SGI_IOC4 + select IDEPCI_SHARE_IRQ select BLK_DEV_IDEDMA_PCI help This driver adds PIO & MultiMode DMA-2 support for the SGI IOC4 diff --git a/trunk/drivers/ide/arm/icside.c b/trunk/drivers/ide/arm/icside.c index 673402f4a295..93f71fcfc04d 100644 --- a/trunk/drivers/ide/arm/icside.c +++ b/trunk/drivers/ide/arm/icside.c @@ -272,6 +272,8 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode) case XFER_SW_DMA_0: cycle_time = 480; break; + default: + return; } /* diff --git a/trunk/drivers/ide/cris/ide-cris.c b/trunk/drivers/ide/cris/ide-cris.c index 325e608d9e62..476e0d65ed43 100644 --- a/trunk/drivers/ide/cris/ide-cris.c +++ b/trunk/drivers/ide/cris/ide-cris.c @@ -747,6 +747,8 @@ static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed) strobe = ATA_DMA2_STROBE; hold = ATA_DMA2_HOLD; break; + default: + return; } if (speed >= XFER_UDMA_0) diff --git a/trunk/drivers/ide/ide-acpi.c b/trunk/drivers/ide/ide-acpi.c index e0bb0cfa7bdd..899d56536e80 100644 --- a/trunk/drivers/ide/ide-acpi.c +++ b/trunk/drivers/ide/ide-acpi.c @@ -383,19 +383,27 @@ static int taskfile_load_raw(ide_drive_t *drive, gtf->tfa[3], gtf->tfa[4], gtf->tfa[5], gtf->tfa[6]); memset(&args, 0, sizeof(ide_task_t)); + args.command_type = IDE_DRIVE_TASK_NO_DATA; + args.data_phase = TASKFILE_NO_DATA; + args.handler = &task_no_data_intr; /* convert gtf to IDE Taskfile */ - memcpy(&args.tf_array[7], >f->tfa, 7); - args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; + args.tfRegister[1] = gtf->tfa[0]; /* 0x1f1 */ + args.tfRegister[2] = gtf->tfa[1]; /* 0x1f2 */ + args.tfRegister[3] = gtf->tfa[2]; /* 0x1f3 */ + args.tfRegister[4] = gtf->tfa[3]; /* 0x1f4 */ + args.tfRegister[5] = gtf->tfa[4]; /* 0x1f5 */ + args.tfRegister[6] = gtf->tfa[5]; /* 0x1f6 */ + args.tfRegister[7] = gtf->tfa[6]; /* 0x1f7 */ if (ide_noacpitfs) { DEBPRINT("_GTF execution disabled\n"); return err; } - err = ide_no_data_taskfile(drive, &args); + err = ide_raw_taskfile(drive, &args, NULL); if (err) - printk(KERN_ERR "%s: ide_no_data_taskfile failed: %u\n", + printk(KERN_ERR "%s: ide_raw_taskfile failed: %u\n", __FUNCTION__, err); return err; diff --git a/trunk/drivers/ide/ide-cd.c b/trunk/drivers/ide/ide-cd.c index 44b033ec0ab0..c7d77f0ad892 100644 --- a/trunk/drivers/ide/ide-cd.c +++ b/trunk/drivers/ide/ide-cd.c @@ -917,13 +917,19 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive, if (ide_wait_stat(&startstop, drive, 0, BUSY_STAT, WAIT_READY)) return startstop; - /* FIXME: for Virtual DMA we must check harder */ if (info->dma) info->dma = !hwif->dma_setup(drive); /* Set up the controller registers. */ - ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL | - IDE_TFLAG_NO_SELECT_MASK, xferlen, info->dma); + /* FIXME: for Virtual DMA we must check harder */ + HWIF(drive)->OUTB(info->dma, IDE_FEATURE_REG); + HWIF(drive)->OUTB(0, IDE_IREASON_REG); + HWIF(drive)->OUTB(0, IDE_SECTOR_REG); + + HWIF(drive)->OUTB(xferlen & 0xff, IDE_BCOUNTL_REG); + HWIF(drive)->OUTB(xferlen >> 8 , IDE_BCOUNTH_REG); + if (IDE_CONTROL_REG) + HWIF(drive)->OUTB(drive->ctl, IDE_CONTROL_REG); if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) { /* waiting for CDB interrupt, not DMA yet. */ diff --git a/trunk/drivers/ide/ide-disk.c b/trunk/drivers/ide/ide-disk.c index d8fdd865dea9..b1781908e1f2 100644 --- a/trunk/drivers/ide/ide-disk.c +++ b/trunk/drivers/ide/ide-disk.c @@ -129,50 +129,6 @@ static int lba_capacity_is_ok (struct hd_driveid *id) return 0; /* lba_capacity value may be bad */ } -static const u8 ide_rw_cmds[] = { - WIN_MULTREAD, - WIN_MULTWRITE, - WIN_MULTREAD_EXT, - WIN_MULTWRITE_EXT, - WIN_READ, - WIN_WRITE, - WIN_READ_EXT, - WIN_WRITE_EXT, - WIN_READDMA, - WIN_WRITEDMA, - WIN_READDMA_EXT, - WIN_WRITEDMA_EXT, -}; - -static const u8 ide_data_phases[] = { - TASKFILE_MULTI_IN, - TASKFILE_MULTI_OUT, - TASKFILE_IN, - TASKFILE_OUT, - TASKFILE_IN_DMA, - TASKFILE_OUT_DMA, -}; - -static void ide_tf_set_cmd(ide_drive_t *drive, ide_task_t *task, u8 dma) -{ - u8 index, lba48, write; - - lba48 = (task->tf_flags & IDE_TFLAG_LBA48) ? 2 : 0; - write = (task->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0; - - if (dma) - index = drive->vdma ? 4 : 8; - else - index = drive->mult_count ? 0 : 4; - - task->tf.command = ide_rw_cmds[index + lba48 + write]; - - if (dma) - index = 8; /* fixup index */ - - task->data_phase = ide_data_phases[index / 2 + write]; -} - /* * __ide_do_rw_disk() issues READ and WRITE commands to a disk, * using LBA if supported, or CHS otherwise, to address sectors. @@ -181,11 +137,11 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq, { ide_hwif_t *hwif = HWIF(drive); unsigned int dma = drive->using_dma; - u16 nsectors = (u16)rq->nr_sectors; u8 lba48 = (drive->addressing == 1) ? 1 : 0; - ide_task_t task; - struct ide_taskfile *tf = &task.tf; - ide_startstop_t rc; + task_ioreg_t command = WIN_NOP; + ata_nsector_t nsectors; + + nsectors.all = (u16) rq->nr_sectors; if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) { if (block + rq->nr_sectors > 1ULL << 28) @@ -199,76 +155,121 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq, ide_map_sg(drive, rq); } - memset(&task, 0, sizeof(task)); - task.tf_flags = IDE_TFLAG_NO_SELECT_MASK; /* FIXME? */ - task.tf_flags |= (IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE); + if (IDE_CONTROL_REG) + hwif->OUTB(drive->ctl, IDE_CONTROL_REG); + + /* FIXME: SELECT_MASK(drive, 0) ? */ if (drive->select.b.lba) { if (lba48) { + task_ioreg_t tasklets[10]; + pr_debug("%s: LBA=0x%012llx\n", drive->name, (unsigned long long)block); - tf->hob_nsect = (nsectors >> 8) & 0xff; - tf->hob_lbal = (u8)(block >> 24); - if (sizeof(block) != 4) { - tf->hob_lbam = (u8)((u64)block >> 32); - tf->hob_lbah = (u8)((u64)block >> 40); + tasklets[0] = 0; + tasklets[1] = 0; + tasklets[2] = nsectors.b.low; + tasklets[3] = nsectors.b.high; + tasklets[4] = (task_ioreg_t) block; + tasklets[5] = (task_ioreg_t) (block>>8); + tasklets[6] = (task_ioreg_t) (block>>16); + tasklets[7] = (task_ioreg_t) (block>>24); + if (sizeof(block) == 4) { + tasklets[8] = (task_ioreg_t) 0; + tasklets[9] = (task_ioreg_t) 0; + } else { + tasklets[8] = (task_ioreg_t)((u64)block >> 32); + tasklets[9] = (task_ioreg_t)((u64)block >> 40); } - - tf->nsect = nsectors & 0xff; - tf->lbal = (u8) block; - tf->lbam = (u8)(block >> 8); - tf->lbah = (u8)(block >> 16); #ifdef DEBUG printk("%s: 0x%02x%02x 0x%02x%02x%02x%02x%02x%02x\n", - drive->name, tf->hob_nsect, tf->nsect, - tf->hob_lbah, tf->hob_lbam, tf->hob_lbal, - tf->lbah, tf->lbam, tf->lbal); + drive->name, tasklets[3], tasklets[2], + tasklets[9], tasklets[8], tasklets[7], + tasklets[6], tasklets[5], tasklets[4]); #endif - task.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_OUT_HOB); + hwif->OUTB(tasklets[1], IDE_FEATURE_REG); + hwif->OUTB(tasklets[3], IDE_NSECTOR_REG); + hwif->OUTB(tasklets[7], IDE_SECTOR_REG); + hwif->OUTB(tasklets[8], IDE_LCYL_REG); + hwif->OUTB(tasklets[9], IDE_HCYL_REG); + + hwif->OUTB(tasklets[0], IDE_FEATURE_REG); + hwif->OUTB(tasklets[2], IDE_NSECTOR_REG); + hwif->OUTB(tasklets[4], IDE_SECTOR_REG); + hwif->OUTB(tasklets[5], IDE_LCYL_REG); + hwif->OUTB(tasklets[6], IDE_HCYL_REG); + hwif->OUTB(0x00|drive->select.all,IDE_SELECT_REG); } else { - tf->nsect = nsectors & 0xff; - tf->lbal = block; - tf->lbam = block >>= 8; - tf->lbah = block >>= 8; - tf->device = (block >> 8) & 0xf; + hwif->OUTB(0x00, IDE_FEATURE_REG); + hwif->OUTB(nsectors.b.low, IDE_NSECTOR_REG); + hwif->OUTB(block, IDE_SECTOR_REG); + hwif->OUTB(block>>=8, IDE_LCYL_REG); + hwif->OUTB(block>>=8, IDE_HCYL_REG); + hwif->OUTB(((block>>8)&0x0f)|drive->select.all,IDE_SELECT_REG); } } else { unsigned int sect,head,cyl,track; track = (int)block / drive->sect; sect = (int)block % drive->sect + 1; + hwif->OUTB(sect, IDE_SECTOR_REG); head = track % drive->head; cyl = track / drive->head; pr_debug("%s: CHS=%u/%u/%u\n", drive->name, cyl, head, sect); - tf->nsect = nsectors & 0xff; - tf->lbal = sect; - tf->lbam = cyl; - tf->lbah = cyl >> 8; - tf->device = head; + hwif->OUTB(0x00, IDE_FEATURE_REG); + hwif->OUTB(nsectors.b.low, IDE_NSECTOR_REG); + hwif->OUTB(cyl, IDE_LCYL_REG); + hwif->OUTB(cyl>>8, IDE_HCYL_REG); + hwif->OUTB(head|drive->select.all,IDE_SELECT_REG); } - if (rq_data_dir(rq)) - task.tf_flags |= IDE_TFLAG_WRITE; - - ide_tf_set_cmd(drive, &task, dma); - if (!dma) - hwif->data_phase = task.data_phase; - task.rq = rq; - - rc = do_rw_taskfile(drive, &task); - - if (rc == ide_stopped && dma) { + if (dma) { + if (!hwif->dma_setup(drive)) { + if (rq_data_dir(rq)) { + command = lba48 ? WIN_WRITEDMA_EXT : WIN_WRITEDMA; + if (drive->vdma) + command = lba48 ? WIN_WRITE_EXT: WIN_WRITE; + } else { + command = lba48 ? WIN_READDMA_EXT : WIN_READDMA; + if (drive->vdma) + command = lba48 ? WIN_READ_EXT: WIN_READ; + } + hwif->dma_exec_cmd(drive, command); + hwif->dma_start(drive); + return ide_started; + } /* fallback to PIO */ - task.tf_flags |= IDE_TFLAG_DMA_PIO_FALLBACK; - ide_tf_set_cmd(drive, &task, 0); - hwif->data_phase = task.data_phase; ide_init_sg_cmd(drive, rq); - rc = do_rw_taskfile(drive, &task); } - return rc; + if (rq_data_dir(rq) == READ) { + + if (drive->mult_count) { + hwif->data_phase = TASKFILE_MULTI_IN; + command = lba48 ? WIN_MULTREAD_EXT : WIN_MULTREAD; + } else { + hwif->data_phase = TASKFILE_IN; + command = lba48 ? WIN_READ_EXT : WIN_READ; + } + + ide_execute_command(drive, command, &task_in_intr, WAIT_CMD, NULL); + return ide_started; + } else { + if (drive->mult_count) { + hwif->data_phase = TASKFILE_MULTI_OUT; + command = lba48 ? WIN_MULTWRITE_EXT : WIN_MULTWRITE; + } else { + hwif->data_phase = TASKFILE_OUT; + command = lba48 ? WIN_WRITE_EXT : WIN_WRITE; + } + + /* FIXME: ->OUTBSYNC ? */ + hwif->OUTB(command, IDE_COMMAND_REG); + + return pre_task_out_intr(drive, rq); + } } /* @@ -306,29 +307,57 @@ static ide_startstop_t ide_do_rw_disk (ide_drive_t *drive, struct request *rq, s * Queries for true maximum capacity of the drive. * Returns maximum LBA address (> 0) of the drive, 0 if failed. */ -static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48) +static unsigned long idedisk_read_native_max_address(ide_drive_t *drive) { ide_task_t args; - struct ide_taskfile *tf = &args.tf; - u64 addr = 0; + unsigned long addr = 0; /* Create IDE/ATA command request structure */ memset(&args, 0, sizeof(ide_task_t)); - if (lba48) - tf->command = WIN_READ_NATIVE_MAX_EXT; - else - tf->command = WIN_READ_NATIVE_MAX; - tf->device = ATA_LBA; - args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; - if (lba48) - args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_OUT_HOB); + args.tfRegister[IDE_SELECT_OFFSET] = 0x40; + args.tfRegister[IDE_COMMAND_OFFSET] = WIN_READ_NATIVE_MAX; + args.command_type = IDE_DRIVE_TASK_NO_DATA; + args.handler = &task_no_data_intr; /* submit command request */ - ide_no_data_taskfile(drive, &args); + ide_raw_taskfile(drive, &args, NULL); /* if OK, compute maximum address value */ - if ((tf->status & 0x01) == 0) - addr = ide_get_lba_addr(tf, lba48) + 1; + if ((args.tfRegister[IDE_STATUS_OFFSET] & 0x01) == 0) { + addr = ((args.tfRegister[IDE_SELECT_OFFSET] & 0x0f) << 24) + | ((args.tfRegister[ IDE_HCYL_OFFSET] ) << 16) + | ((args.tfRegister[ IDE_LCYL_OFFSET] ) << 8) + | ((args.tfRegister[IDE_SECTOR_OFFSET] )); + addr++; /* since the return value is (maxlba - 1), we add 1 */ + } + return addr; +} + +static unsigned long long idedisk_read_native_max_address_ext(ide_drive_t *drive) +{ + ide_task_t args; + unsigned long long addr = 0; + + /* Create IDE/ATA command request structure */ + memset(&args, 0, sizeof(ide_task_t)); + args.tfRegister[IDE_SELECT_OFFSET] = 0x40; + args.tfRegister[IDE_COMMAND_OFFSET] = WIN_READ_NATIVE_MAX_EXT; + args.command_type = IDE_DRIVE_TASK_NO_DATA; + args.handler = &task_no_data_intr; + /* submit command request */ + ide_raw_taskfile(drive, &args, NULL); + + /* if OK, compute maximum address value */ + if ((args.tfRegister[IDE_STATUS_OFFSET] & 0x01) == 0) { + u32 high = (args.hobRegister[IDE_HCYL_OFFSET] << 16) | + (args.hobRegister[IDE_LCYL_OFFSET] << 8) | + args.hobRegister[IDE_SECTOR_OFFSET]; + u32 low = ((args.tfRegister[IDE_HCYL_OFFSET])<<16) | + ((args.tfRegister[IDE_LCYL_OFFSET])<<8) | + (args.tfRegister[IDE_SECTOR_OFFSET]); + addr = ((__u64)high << 24) | low; + addr++; /* since the return value is (maxlba - 1), we add 1 */ + } return addr; } @@ -336,37 +365,67 @@ static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48) * Sets maximum virtual LBA address of the drive. * Returns new maximum virtual LBA address (> 0) or 0 on failure. */ -static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48) +static unsigned long idedisk_set_max_address(ide_drive_t *drive, unsigned long addr_req) { ide_task_t args; - struct ide_taskfile *tf = &args.tf; - u64 addr_set = 0; - + unsigned long addr_set = 0; + addr_req--; /* Create IDE/ATA command request structure */ memset(&args, 0, sizeof(ide_task_t)); - tf->lbal = (addr_req >> 0) & 0xff; - tf->lbam = (addr_req >>= 8) & 0xff; - tf->lbah = (addr_req >>= 8) & 0xff; - if (lba48) { - tf->hob_lbal = (addr_req >>= 8) & 0xff; - tf->hob_lbam = (addr_req >>= 8) & 0xff; - tf->hob_lbah = (addr_req >>= 8) & 0xff; - tf->command = WIN_SET_MAX_EXT; - } else { - tf->device = (addr_req >>= 8) & 0x0f; - tf->command = WIN_SET_MAX; + args.tfRegister[IDE_SECTOR_OFFSET] = ((addr_req >> 0) & 0xff); + args.tfRegister[IDE_LCYL_OFFSET] = ((addr_req >> 8) & 0xff); + args.tfRegister[IDE_HCYL_OFFSET] = ((addr_req >> 16) & 0xff); + args.tfRegister[IDE_SELECT_OFFSET] = ((addr_req >> 24) & 0x0f) | 0x40; + args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SET_MAX; + args.command_type = IDE_DRIVE_TASK_NO_DATA; + args.handler = &task_no_data_intr; + /* submit command request */ + ide_raw_taskfile(drive, &args, NULL); + /* if OK, read new maximum address value */ + if ((args.tfRegister[IDE_STATUS_OFFSET] & 0x01) == 0) { + addr_set = ((args.tfRegister[IDE_SELECT_OFFSET] & 0x0f) << 24) + | ((args.tfRegister[ IDE_HCYL_OFFSET] ) << 16) + | ((args.tfRegister[ IDE_LCYL_OFFSET] ) << 8) + | ((args.tfRegister[IDE_SECTOR_OFFSET] )); + addr_set++; } - tf->device |= ATA_LBA; - args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; - if (lba48) - args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_OUT_HOB); + return addr_set; +} + +static unsigned long long idedisk_set_max_address_ext(ide_drive_t *drive, unsigned long long addr_req) +{ + ide_task_t args; + unsigned long long addr_set = 0; + + addr_req--; + /* Create IDE/ATA command request structure */ + memset(&args, 0, sizeof(ide_task_t)); + args.tfRegister[IDE_SECTOR_OFFSET] = ((addr_req >> 0) & 0xff); + args.tfRegister[IDE_LCYL_OFFSET] = ((addr_req >>= 8) & 0xff); + args.tfRegister[IDE_HCYL_OFFSET] = ((addr_req >>= 8) & 0xff); + args.tfRegister[IDE_SELECT_OFFSET] = 0x40; + args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SET_MAX_EXT; + args.hobRegister[IDE_SECTOR_OFFSET] = (addr_req >>= 8) & 0xff; + args.hobRegister[IDE_LCYL_OFFSET] = (addr_req >>= 8) & 0xff; + args.hobRegister[IDE_HCYL_OFFSET] = (addr_req >>= 8) & 0xff; + args.hobRegister[IDE_SELECT_OFFSET] = 0x40; + args.hobRegister[IDE_CONTROL_OFFSET_HOB]= (drive->ctl|0x80); + args.command_type = IDE_DRIVE_TASK_NO_DATA; + args.handler = &task_no_data_intr; /* submit command request */ - ide_no_data_taskfile(drive, &args); + ide_raw_taskfile(drive, &args, NULL); /* if OK, compute maximum address value */ - if ((tf->status & 0x01) == 0) - addr_set = ide_get_lba_addr(tf, lba48) + 1; - + if ((args.tfRegister[IDE_STATUS_OFFSET] & 0x01) == 0) { + u32 high = (args.hobRegister[IDE_HCYL_OFFSET] << 16) | + (args.hobRegister[IDE_LCYL_OFFSET] << 8) | + args.hobRegister[IDE_SECTOR_OFFSET]; + u32 low = ((args.tfRegister[IDE_HCYL_OFFSET])<<16) | + ((args.tfRegister[IDE_LCYL_OFFSET])<<8) | + (args.tfRegister[IDE_SECTOR_OFFSET]); + addr_set = ((__u64)high << 24) | low; + addr_set++; + } return addr_set; } @@ -412,8 +471,10 @@ static void idedisk_check_hpa(ide_drive_t *drive) int lba48 = idedisk_supports_lba48(drive->id); capacity = drive->capacity64; - - set_max = idedisk_read_native_max_address(drive, lba48); + if (lba48) + set_max = idedisk_read_native_max_address_ext(drive); + else + set_max = idedisk_read_native_max_address(drive); if (ide_in_drive_list(drive->id, hpa_list)) { /* @@ -434,8 +495,10 @@ static void idedisk_check_hpa(ide_drive_t *drive) capacity, sectors_to_MB(capacity), set_max, sectors_to_MB(set_max)); - set_max = idedisk_set_max_address(drive, set_max, lba48); - + if (lba48) + set_max = idedisk_set_max_address_ext(drive, set_max); + else + set_max = idedisk_set_max_address(drive, set_max); if (set_max) { drive->capacity64 = set_max; printk(KERN_INFO "%s: Host Protected Area disabled.\n", @@ -493,32 +556,32 @@ static sector_t idedisk_capacity (ide_drive_t *drive) static int smart_enable(ide_drive_t *drive) { ide_task_t args; - struct ide_taskfile *tf = &args.tf; memset(&args, 0, sizeof(ide_task_t)); - tf->feature = SMART_ENABLE; - tf->lbam = SMART_LCYL_PASS; - tf->lbah = SMART_HCYL_PASS; - tf->command = WIN_SMART; - args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; - return ide_no_data_taskfile(drive, &args); + args.tfRegister[IDE_FEATURE_OFFSET] = SMART_ENABLE; + args.tfRegister[IDE_LCYL_OFFSET] = SMART_LCYL_PASS; + args.tfRegister[IDE_HCYL_OFFSET] = SMART_HCYL_PASS; + args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SMART; + args.command_type = IDE_DRIVE_TASK_NO_DATA; + args.handler = &task_no_data_intr; + return ide_raw_taskfile(drive, &args, NULL); } static int get_smart_data(ide_drive_t *drive, u8 *buf, u8 sub_cmd) { ide_task_t args; - struct ide_taskfile *tf = &args.tf; memset(&args, 0, sizeof(ide_task_t)); - tf->feature = sub_cmd; - tf->nsect = 0x01; - tf->lbam = SMART_LCYL_PASS; - tf->lbah = SMART_HCYL_PASS; - tf->command = WIN_SMART; - args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; - args.data_phase = TASKFILE_IN; + args.tfRegister[IDE_FEATURE_OFFSET] = sub_cmd; + args.tfRegister[IDE_NSECTOR_OFFSET] = 0x01; + args.tfRegister[IDE_LCYL_OFFSET] = SMART_LCYL_PASS; + args.tfRegister[IDE_HCYL_OFFSET] = SMART_HCYL_PASS; + args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SMART; + args.command_type = IDE_DRIVE_TASK_IN; + args.data_phase = TASKFILE_IN; + args.handler = &task_in_intr; (void) smart_enable(drive); - return ide_raw_taskfile(drive, &args, buf, 1); + return ide_raw_taskfile(drive, &args, buf); } static int proc_idedisk_read_cache @@ -596,20 +659,19 @@ static ide_proc_entry_t idedisk_proc[] = { static void idedisk_prepare_flush(struct request_queue *q, struct request *rq) { ide_drive_t *drive = q->queuedata; - ide_task_t task; - memset(&task, 0, sizeof(task)); + memset(rq->cmd, 0, sizeof(rq->cmd)); + if (ide_id_has_flush_cache_ext(drive->id) && (drive->capacity64 >= (1UL << 28))) - task.tf.command = WIN_FLUSH_CACHE_EXT; + rq->cmd[0] = WIN_FLUSH_CACHE_EXT; else - task.tf.command = WIN_FLUSH_CACHE; - task.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; - task.data_phase = TASKFILE_NO_DATA; + rq->cmd[0] = WIN_FLUSH_CACHE; + - rq->cmd_type = REQ_TYPE_ATA_TASKFILE; + rq->cmd_type = REQ_TYPE_ATA_TASK; rq->cmd_flags |= REQ_SOFTBARRIER; - rq->special = &task; + rq->buffer = rq->cmd; } /* @@ -691,11 +753,12 @@ static int write_cache(ide_drive_t *drive, int arg) if (ide_id_has_flush_cache(drive->id)) { memset(&args, 0, sizeof(ide_task_t)); - args.tf.feature = arg ? + args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE; - args.tf.command = WIN_SETFEATURES; - args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; - err = ide_no_data_taskfile(drive, &args); + args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES; + args.command_type = IDE_DRIVE_TASK_NO_DATA; + args.handler = &task_no_data_intr; + err = ide_raw_taskfile(drive, &args, NULL); if (err == 0) drive->wcache = arg; } @@ -711,11 +774,12 @@ static int do_idedisk_flushcache (ide_drive_t *drive) memset(&args, 0, sizeof(ide_task_t)); if (ide_id_has_flush_cache_ext(drive->id)) - args.tf.command = WIN_FLUSH_CACHE_EXT; + args.tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE_EXT; else - args.tf.command = WIN_FLUSH_CACHE; - args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; - return ide_no_data_taskfile(drive, &args); + args.tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE; + args.command_type = IDE_DRIVE_TASK_NO_DATA; + args.handler = &task_no_data_intr; + return ide_raw_taskfile(drive, &args, NULL); } static int set_acoustic (ide_drive_t *drive, int arg) @@ -726,11 +790,13 @@ static int set_acoustic (ide_drive_t *drive, int arg) return -EINVAL; memset(&args, 0, sizeof(ide_task_t)); - args.tf.feature = arg ? SETFEATURES_EN_AAM : SETFEATURES_DIS_AAM; - args.tf.nsect = arg; - args.tf.command = WIN_SETFEATURES; - args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; - ide_no_data_taskfile(drive, &args); + args.tfRegister[IDE_FEATURE_OFFSET] = (arg) ? SETFEATURES_EN_AAM : + SETFEATURES_DIS_AAM; + args.tfRegister[IDE_NSECTOR_OFFSET] = arg; + args.tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES; + args.command_type = IDE_DRIVE_TASK_NO_DATA; + args.handler = &task_no_data_intr; + ide_raw_taskfile(drive, &args, NULL); drive->acoustic = arg; return 0; } @@ -991,15 +1057,16 @@ static int idedisk_open(struct inode *inode, struct file *filp) if (drive->removable && idkp->openers == 1) { ide_task_t args; memset(&args, 0, sizeof(ide_task_t)); - args.tf.command = WIN_DOORLOCK; - args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; + args.tfRegister[IDE_COMMAND_OFFSET] = WIN_DOORLOCK; + args.command_type = IDE_DRIVE_TASK_NO_DATA; + args.handler = &task_no_data_intr; check_disk_change(inode->i_bdev); /* * Ignore the return code from door_lock, * since the open() has already succeeded, * and the door_lock is irrelevant at this point. */ - if (drive->doorlocking && ide_no_data_taskfile(drive, &args)) + if (drive->doorlocking && ide_raw_taskfile(drive, &args, NULL)) drive->doorlocking = 0; } return 0; @@ -1017,9 +1084,10 @@ static int idedisk_release(struct inode *inode, struct file *filp) if (drive->removable && idkp->openers == 1) { ide_task_t args; memset(&args, 0, sizeof(ide_task_t)); - args.tf.command = WIN_DOORUNLOCK; - args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; - if (drive->doorlocking && ide_no_data_taskfile(drive, &args)) + args.tfRegister[IDE_COMMAND_OFFSET] = WIN_DOORUNLOCK; + args.command_type = IDE_DRIVE_TASK_NO_DATA; + args.handler = &task_no_data_intr; + if (drive->doorlocking && ide_raw_taskfile(drive, &args, NULL)) drive->doorlocking = 0; } diff --git a/trunk/drivers/ide/ide-dma.c b/trunk/drivers/ide/ide-dma.c index 18c78ad2b31e..4703837bf1fc 100644 --- a/trunk/drivers/ide/ide-dma.c +++ b/trunk/drivers/ide/ide-dma.c @@ -491,6 +491,10 @@ EXPORT_SYMBOL(ide_dma_host_on); int __ide_dma_on (ide_drive_t *drive) { + /* consult the list of known "bad" drives */ + if (__ide_dma_bad_drive(drive)) + return 1; + drive->using_dma = 1; ide_toggle_bounce(drive, 1); @@ -823,19 +827,22 @@ int ide_set_dma(ide_drive_t *drive) ide_hwif_t *hwif = drive->hwif; int rc; - /* - * Force DMAing for the beginning of the check. - * Some chipsets appear to do interesting - * things, if not checked and cleared. - * PARANOIA!!! - */ - hwif->dma_off_quietly(drive); - rc = ide_dma_check(drive); - if (rc) - return rc; - return hwif->ide_dma_on(drive); + switch(rc) { + case -1: /* DMA needs to be disabled */ + hwif->dma_off_quietly(drive); + return -1; + case 0: /* DMA needs to be enabled */ + return hwif->ide_dma_on(drive); + case 1: /* DMA setting cannot be changed */ + break; + default: + BUG(); + break; + } + + return rc; } #ifdef CONFIG_BLK_DEV_IDEDMA_PCI @@ -961,6 +968,11 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base, unsigned num_ports) hwif->dma_base = base; + if (hwif->mate) + hwif->dma_master = hwif->channel ? hwif->mate->dma_base : base; + else + hwif->dma_master = base; + if (!(hwif->dma_command)) hwif->dma_command = hwif->dma_base; if (!(hwif->dma_vendor1)) @@ -1002,6 +1014,8 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base, unsigned num_ports) hwif->drives[1].name, (dma_stat & 0x40) ? "DMA" : "pio"); } printk("\n"); + + BUG_ON(!hwif->dma_master); } EXPORT_SYMBOL_GPL(ide_setup_dma); diff --git a/trunk/drivers/ide/ide-floppy.c b/trunk/drivers/ide/ide-floppy.c index ff8232ef9659..04a357808f2e 100644 --- a/trunk/drivers/ide/ide-floppy.c +++ b/trunk/drivers/ide/ide-floppy.c @@ -369,6 +369,27 @@ typedef struct ide_floppy_obj { #define IDEFLOPPY_IOCTL_FORMAT_START 0x4602 #define IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS 0x4603 +#if 0 +/* + * Special requests for our block device strategy routine. + */ +#define IDEFLOPPY_FIRST_RQ 90 + +/* + * IDEFLOPPY_PC_RQ is used to queue a packet command in the request queue. + */ +#define IDEFLOPPY_PC_RQ 90 + +#define IDEFLOPPY_LAST_RQ 90 + +/* + * A macro which can be used to check if a given request command + * originated in the driver or in the buffer cache layer. + */ +#define IDEFLOPPY_RQ_CMD(cmd) ((cmd >= IDEFLOPPY_FIRST_RQ) && (cmd <= IDEFLOPPY_LAST_RQ)) + +#endif + /* * Error codes which are returned in rq->errors to the higher part * of the driver. @@ -772,8 +793,9 @@ static void idefloppy_retry_pc (ide_drive_t *drive) { idefloppy_pc_t *pc; struct request *rq; + atapi_error_t error; - (void)drive->hwif->INB(IDE_ERROR_REG); + error.all = HWIF(drive)->INB(IDE_ERROR_REG); pc = idefloppy_next_pc_storage(drive); rq = idefloppy_next_rq_storage(drive); idefloppy_create_request_sense_cmd(pc); @@ -787,12 +809,12 @@ static void idefloppy_retry_pc (ide_drive_t *drive) static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive) { idefloppy_floppy_t *floppy = drive->driver_data; - ide_hwif_t *hwif = drive->hwif; + atapi_status_t status; + atapi_bcount_t bcount; + atapi_ireason_t ireason; idefloppy_pc_t *pc = floppy->pc; struct request *rq = pc->rq; unsigned int temp; - u16 bcount; - u8 stat, ireason; debug_log(KERN_INFO "ide-floppy: Reached %s interrupt handler\n", __FUNCTION__); @@ -808,16 +830,16 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive) } /* Clear the interrupt */ - stat = drive->hwif->INB(IDE_STATUS_REG); + status.all = HWIF(drive)->INB(IDE_STATUS_REG); - if ((stat & DRQ_STAT) == 0) { /* No more interrupts */ + if (!status.b.drq) { /* No more interrupts */ debug_log(KERN_INFO "Packet command completed, %d bytes " "transferred\n", pc->actually_transferred); clear_bit(PC_DMA_IN_PROGRESS, &pc->flags); local_irq_enable_in_hardirq(); - if ((stat & ERR_STAT) || test_bit(PC_DMA_ERROR, &pc->flags)) { + if (status.b.check || test_bit(PC_DMA_ERROR, &pc->flags)) { /* Error detected */ debug_log(KERN_INFO "ide-floppy: %s: I/O error\n", drive->name); @@ -848,32 +870,32 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive) } /* Get the number of bytes to transfer */ - bcount = (hwif->INB(IDE_BCOUNTH_REG) << 8) | - hwif->INB(IDE_BCOUNTL_REG); + bcount.b.high = HWIF(drive)->INB(IDE_BCOUNTH_REG); + bcount.b.low = HWIF(drive)->INB(IDE_BCOUNTL_REG); /* on this interrupt */ - ireason = hwif->INB(IDE_IREASON_REG); + ireason.all = HWIF(drive)->INB(IDE_IREASON_REG); - if (ireason & CD) { + if (ireason.b.cod) { printk(KERN_ERR "ide-floppy: CoD != 0 in idefloppy_pc_intr\n"); return ide_do_reset(drive); } - if (((ireason & IO) == IO) == test_bit(PC_WRITING, &pc->flags)) { + if (ireason.b.io == test_bit(PC_WRITING, &pc->flags)) { /* Hopefully, we will never get here */ printk(KERN_ERR "ide-floppy: We wanted to %s, ", - (ireason & IO) ? "Write" : "Read"); + ireason.b.io ? "Write":"Read"); printk(KERN_ERR "but the floppy wants us to %s !\n", - (ireason & IO) ? "Read" : "Write"); + ireason.b.io ? "Read":"Write"); return ide_do_reset(drive); } if (!test_bit(PC_WRITING, &pc->flags)) { /* Reading - Check that we have enough space */ - temp = pc->actually_transferred + bcount; + temp = pc->actually_transferred + bcount.all; if (temp > pc->request_transfer) { if (temp > pc->buffer_size) { printk(KERN_ERR "ide-floppy: The floppy wants " "to send us more data than expected " "- discarding data\n"); - idefloppy_discard_data(drive, bcount); + idefloppy_discard_data(drive,bcount.all); BUG_ON(HWGROUP(drive)->handler != NULL); ide_set_handler(drive, &idefloppy_pc_intr, @@ -889,21 +911,23 @@ static ide_startstop_t idefloppy_pc_intr (ide_drive_t *drive) if (test_bit(PC_WRITING, &pc->flags)) { if (pc->buffer != NULL) /* Write the current buffer */ - hwif->atapi_output_bytes(drive, pc->current_position, - bcount); + HWIF(drive)->atapi_output_bytes(drive, + pc->current_position, + bcount.all); else - idefloppy_output_buffers(drive, pc, bcount); + idefloppy_output_buffers(drive, pc, bcount.all); } else { if (pc->buffer != NULL) /* Read the current buffer */ - hwif->atapi_input_bytes(drive, pc->current_position, - bcount); + HWIF(drive)->atapi_input_bytes(drive, + pc->current_position, + bcount.all); else - idefloppy_input_buffers(drive, pc, bcount); + idefloppy_input_buffers(drive, pc, bcount.all); } /* Update the current position */ - pc->actually_transferred += bcount; - pc->current_position += bcount; + pc->actually_transferred += bcount.all; + pc->current_position += bcount.all; BUG_ON(HWGROUP(drive)->handler != NULL); ide_set_handler(drive, &idefloppy_pc_intr, IDEFLOPPY_WAIT_CMD, NULL); /* And set the interrupt handler again */ @@ -919,15 +943,15 @@ static ide_startstop_t idefloppy_transfer_pc (ide_drive_t *drive) { ide_startstop_t startstop; idefloppy_floppy_t *floppy = drive->driver_data; - u8 ireason; + atapi_ireason_t ireason; if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) { printk(KERN_ERR "ide-floppy: Strange, packet command " "initiated yet DRQ isn't asserted\n"); return startstop; } - ireason = drive->hwif->INB(IDE_IREASON_REG); - if ((ireason & CD) == 0 || (ireason & IO)) { + ireason.all = HWIF(drive)->INB(IDE_IREASON_REG); + if (!ireason.b.cod || ireason.b.io) { printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) while " "issuing a packet command\n"); return ide_do_reset(drive); @@ -967,15 +991,15 @@ static ide_startstop_t idefloppy_transfer_pc1 (ide_drive_t *drive) { idefloppy_floppy_t *floppy = drive->driver_data; ide_startstop_t startstop; - u8 ireason; + atapi_ireason_t ireason; if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) { printk(KERN_ERR "ide-floppy: Strange, packet command " "initiated yet DRQ isn't asserted\n"); return startstop; } - ireason = drive->hwif->INB(IDE_IREASON_REG); - if ((ireason & CD) == 0 || (ireason & IO)) { + ireason.all = HWIF(drive)->INB(IDE_IREASON_REG); + if (!ireason.b.cod || ireason.b.io) { printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) " "while issuing a packet command\n"); return ide_do_reset(drive); @@ -1017,9 +1041,21 @@ static ide_startstop_t idefloppy_issue_pc (ide_drive_t *drive, idefloppy_pc_t *p { idefloppy_floppy_t *floppy = drive->driver_data; ide_hwif_t *hwif = drive->hwif; + atapi_feature_t feature; + atapi_bcount_t bcount; ide_handler_t *pkt_xfer_routine; - u16 bcount; - u8 dma; + +#if 0 /* Accessing floppy->pc is not valid here, the previous pc may be gone + and have lived on another thread's stack; that stack may have become + unmapped meanwhile (CONFIG_DEBUG_PAGEALLOC). */ +#if IDEFLOPPY_DEBUG_BUGS + if (floppy->pc->c[0] == IDEFLOPPY_REQUEST_SENSE_CMD && + pc->c[0] == IDEFLOPPY_REQUEST_SENSE_CMD) { + printk(KERN_ERR "ide-floppy: possible ide-floppy.c bug - " + "Two request sense in serial were issued\n"); + } +#endif /* IDEFLOPPY_DEBUG_BUGS */ +#endif if (floppy->failed_pc == NULL && pc->c[0] != IDEFLOPPY_REQUEST_SENSE_CMD) @@ -1057,20 +1093,25 @@ static ide_startstop_t idefloppy_issue_pc (ide_drive_t *drive, idefloppy_pc_t *p /* We haven't transferred any data yet */ pc->actually_transferred = 0; pc->current_position = pc->buffer; - bcount = min(pc->request_transfer, 63 * 1024); + bcount.all = min(pc->request_transfer, 63 * 1024); if (test_and_clear_bit(PC_DMA_ERROR, &pc->flags)) ide_dma_off(drive); - dma = 0; + feature.all = 0; if (test_bit(PC_DMA_RECOMMENDED, &pc->flags) && drive->using_dma) - dma = !hwif->dma_setup(drive); + feature.b.dma = !hwif->dma_setup(drive); - ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK | - IDE_TFLAG_OUT_DEVICE, bcount, dma); + if (IDE_CONTROL_REG) + HWIF(drive)->OUTB(drive->ctl, IDE_CONTROL_REG); + /* Use PIO/DMA */ + HWIF(drive)->OUTB(feature.all, IDE_FEATURE_REG); + HWIF(drive)->OUTB(bcount.b.high, IDE_BCOUNTH_REG); + HWIF(drive)->OUTB(bcount.b.low, IDE_BCOUNTL_REG); + HWIF(drive)->OUTB(drive->select.all, IDE_SELECT_REG); - if (dma) { /* Begin DMA, if necessary */ + if (feature.b.dma) { /* Begin DMA, if necessary */ set_bit(PC_DMA_IN_PROGRESS, &pc->flags); hwif->dma_start(drive); } @@ -1624,14 +1665,14 @@ static int idefloppy_get_format_progress(ide_drive_t *drive, int __user *arg) /* Else assume format_unit has finished, and we're ** at 0x10000 */ } else { + atapi_status_t status; unsigned long flags; - u8 stat; local_irq_save(flags); - stat = drive->hwif->INB(IDE_STATUS_REG); + status.all = HWIF(drive)->INB(IDE_STATUS_REG); local_irq_restore(flags); - progress_indication = ((stat & SEEK_STAT) == 0) ? 0 : 0x10000; + progress_indication = !status.b.dsc ? 0 : 0x10000; } if (put_user(progress_indication, arg)) return (-EFAULT); diff --git a/trunk/drivers/ide/ide-io.c b/trunk/drivers/ide/ide-io.c index 2711b5a6962d..bef781fec500 100644 --- a/trunk/drivers/ide/ide-io.c +++ b/trunk/drivers/ide/ide-io.c @@ -189,14 +189,18 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request * return ide_stopped; } if (ide_id_has_flush_cache_ext(drive->id)) - args->tf.command = WIN_FLUSH_CACHE_EXT; + args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE_EXT; else - args->tf.command = WIN_FLUSH_CACHE; - goto out_do_tf; + args->tfRegister[IDE_COMMAND_OFFSET] = WIN_FLUSH_CACHE; + args->command_type = IDE_DRIVE_TASK_NO_DATA; + args->handler = &task_no_data_intr; + return do_rw_taskfile(drive, args); case idedisk_pm_standby: /* Suspend step 2 (standby) */ - args->tf.command = WIN_STANDBYNOW1; - goto out_do_tf; + args->tfRegister[IDE_COMMAND_OFFSET] = WIN_STANDBYNOW1; + args->command_type = IDE_DRIVE_TASK_NO_DATA; + args->handler = &task_no_data_intr; + return do_rw_taskfile(drive, args); case idedisk_pm_restore_pio: /* Resume step 1 (restore PIO) */ ide_set_max_pio(drive); @@ -210,8 +214,10 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request * return ide_stopped; case idedisk_pm_idle: /* Resume step 2 (idle) */ - args->tf.command = WIN_IDLEIMMEDIATE; - goto out_do_tf; + args->tfRegister[IDE_COMMAND_OFFSET] = WIN_IDLEIMMEDIATE; + args->command_type = IDE_DRIVE_TASK_NO_DATA; + args->handler = task_no_data_intr; + return do_rw_taskfile(drive, args); case ide_pm_restore_dma: /* Resume step 3 (restore DMA) */ /* @@ -221,6 +227,7 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request * */ if (drive->hwif->ide_dma_on == NULL) break; + drive->hwif->dma_off_quietly(drive); /* * TODO: respect ->using_dma setting */ @@ -229,11 +236,6 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request * } pm->pm_step = ide_pm_state_completed; return ide_stopped; - -out_do_tf: - args->tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; - args->data_phase = TASKFILE_NO_DATA; - return do_rw_taskfile(drive, args); } /** @@ -296,48 +298,6 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) spin_unlock_irqrestore(&ide_lock, flags); } -void ide_tf_read(ide_drive_t *drive, ide_task_t *task) -{ - ide_hwif_t *hwif = drive->hwif; - struct ide_taskfile *tf = &task->tf; - - if (task->tf_flags & IDE_TFLAG_IN_DATA) { - u16 data = hwif->INW(IDE_DATA_REG); - - tf->data = data & 0xff; - tf->hob_data = (data >> 8) & 0xff; - } - - /* be sure we're looking at the low order bits */ - hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG); - - if (task->tf_flags & IDE_TFLAG_IN_NSECT) - tf->nsect = hwif->INB(IDE_NSECTOR_REG); - if (task->tf_flags & IDE_TFLAG_IN_LBAL) - tf->lbal = hwif->INB(IDE_SECTOR_REG); - if (task->tf_flags & IDE_TFLAG_IN_LBAM) - tf->lbam = hwif->INB(IDE_LCYL_REG); - if (task->tf_flags & IDE_TFLAG_IN_LBAH) - tf->lbah = hwif->INB(IDE_HCYL_REG); - if (task->tf_flags & IDE_TFLAG_IN_DEVICE) - tf->device = hwif->INB(IDE_SELECT_REG); - - if (task->tf_flags & IDE_TFLAG_LBA48) { - hwif->OUTB(drive->ctl | 0x80, IDE_CONTROL_REG); - - if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE) - tf->hob_feature = hwif->INB(IDE_FEATURE_REG); - if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT) - tf->hob_nsect = hwif->INB(IDE_NSECTOR_REG); - if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL) - tf->hob_lbal = hwif->INB(IDE_SECTOR_REG); - if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM) - tf->hob_lbam = hwif->INB(IDE_LCYL_REG); - if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH) - tf->hob_lbah = hwif->INB(IDE_HCYL_REG); - } -} - /** * ide_end_drive_cmd - end an explicit drive command * @drive: command @@ -372,22 +332,51 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) args[1] = err; args[2] = hwif->INB(IDE_NSECTOR_REG); } + } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) { + u8 *args = (u8 *) rq->buffer; + if (rq->errors == 0) + rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); + + if (args) { + args[0] = stat; + args[1] = err; + /* be sure we're looking at the low order bits */ + hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG); + args[2] = hwif->INB(IDE_NSECTOR_REG); + args[3] = hwif->INB(IDE_SECTOR_REG); + args[4] = hwif->INB(IDE_LCYL_REG); + args[5] = hwif->INB(IDE_HCYL_REG); + args[6] = hwif->INB(IDE_SELECT_REG); + } } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { ide_task_t *args = (ide_task_t *) rq->special; if (rq->errors == 0) rq->errors = !OK_STAT(stat,READY_STAT,BAD_STAT); if (args) { - struct ide_taskfile *tf = &args->tf; - - tf->error = err; - tf->status = stat; - - args->tf_flags |= (IDE_TFLAG_IN_TF|IDE_TFLAG_IN_DEVICE); - if (args->tf_flags & IDE_TFLAG_LBA48) - args->tf_flags |= IDE_TFLAG_IN_HOB; - - ide_tf_read(drive, args); + if (args->tf_in_flags.b.data) { + u16 data = hwif->INW(IDE_DATA_REG); + args->tfRegister[IDE_DATA_OFFSET] = (data) & 0xFF; + args->hobRegister[IDE_DATA_OFFSET] = (data >> 8) & 0xFF; + } + args->tfRegister[IDE_ERROR_OFFSET] = err; + /* be sure we're looking at the low order bits */ + hwif->OUTB(drive->ctl & ~0x80, IDE_CONTROL_REG); + args->tfRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG); + args->tfRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG); + args->tfRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG); + args->tfRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG); + args->tfRegister[IDE_SELECT_OFFSET] = hwif->INB(IDE_SELECT_REG); + args->tfRegister[IDE_STATUS_OFFSET] = stat; + + if (drive->addressing == 1) { + hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG); + args->hobRegister[IDE_FEATURE_OFFSET] = hwif->INB(IDE_FEATURE_REG); + args->hobRegister[IDE_NSECTOR_OFFSET] = hwif->INB(IDE_NSECTOR_REG); + args->hobRegister[IDE_SECTOR_OFFSET] = hwif->INB(IDE_SECTOR_REG); + args->hobRegister[IDE_LCYL_OFFSET] = hwif->INB(IDE_LCYL_REG); + args->hobRegister[IDE_HCYL_OFFSET] = hwif->INB(IDE_HCYL_REG); + } } } else if (blk_pm_request(rq)) { struct request_pm_state *pm = rq->data; @@ -626,6 +615,28 @@ ide_startstop_t ide_abort(ide_drive_t *drive, const char *msg) return __ide_abort(drive, rq); } +/** + * ide_cmd - issue a simple drive command + * @drive: drive the command is for + * @cmd: command byte + * @nsect: sector byte + * @handler: handler for the command completion + * + * Issue a simple drive command with interrupts. + * The drive must be selected beforehand. + */ + +static void ide_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, + ide_handler_t *handler) +{ + ide_hwif_t *hwif = HWIF(drive); + if (IDE_CONTROL_REG) + hwif->OUTB(drive->ctl,IDE_CONTROL_REG); /* clear nIEN */ + SELECT_MASK(drive,0); + hwif->OUTB(nsect,IDE_NSECTOR_REG); + ide_execute_command(drive, cmd, handler, WAIT_CMD, NULL); +} + /** * drive_cmd_intr - drive command completion interrupt * @drive: drive the completion interrupt occurred on @@ -662,26 +673,32 @@ static ide_startstop_t drive_cmd_intr (ide_drive_t *drive) return ide_stopped; } -static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) +static void ide_init_specify_cmd(ide_drive_t *drive, ide_task_t *task) { - tf->nsect = drive->sect; - tf->lbal = drive->sect; - tf->lbam = drive->cyl; - tf->lbah = drive->cyl >> 8; - tf->device = ((drive->head - 1) | drive->select.all) & ~ATA_LBA; - tf->command = WIN_SPECIFY; + task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect; + task->tfRegister[IDE_SECTOR_OFFSET] = drive->sect; + task->tfRegister[IDE_LCYL_OFFSET] = drive->cyl; + task->tfRegister[IDE_HCYL_OFFSET] = drive->cyl>>8; + task->tfRegister[IDE_SELECT_OFFSET] = ((drive->head-1)|drive->select.all)&0xBF; + task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SPECIFY; + + task->handler = &set_geometry_intr; } -static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf) +static void ide_init_restore_cmd(ide_drive_t *drive, ide_task_t *task) { - tf->nsect = drive->sect; - tf->command = WIN_RESTORE; + task->tfRegister[IDE_NSECTOR_OFFSET] = drive->sect; + task->tfRegister[IDE_COMMAND_OFFSET] = WIN_RESTORE; + + task->handler = &recal_intr; } -static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf) +static void ide_init_setmult_cmd(ide_drive_t *drive, ide_task_t *task) { - tf->nsect = drive->mult_req; - tf->command = WIN_SETMULT; + task->tfRegister[IDE_NSECTOR_OFFSET] = drive->mult_req; + task->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETMULT; + + task->handler = &set_multmode_intr; } static ide_startstop_t ide_disk_special(ide_drive_t *drive) @@ -690,19 +707,19 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive) ide_task_t args; memset(&args, 0, sizeof(ide_task_t)); - args.data_phase = TASKFILE_NO_DATA; + args.command_type = IDE_DRIVE_TASK_NO_DATA; if (s->b.set_geometry) { s->b.set_geometry = 0; - ide_tf_set_specify_cmd(drive, &args.tf); + ide_init_specify_cmd(drive, &args); } else if (s->b.recalibrate) { s->b.recalibrate = 0; - ide_tf_set_restore_cmd(drive, &args.tf); + ide_init_restore_cmd(drive, &args); } else if (s->b.set_multmode) { s->b.set_multmode = 0; if (drive->mult_req > drive->id->max_multsect) drive->mult_req = drive->id->max_multsect; - ide_tf_set_setmult_cmd(drive, &args.tf); + ide_init_setmult_cmd(drive, &args); } else if (s->all) { int special = s->all; s->all = 0; @@ -710,9 +727,6 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive) return ide_stopped; } - args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE | - IDE_TFLAG_CUSTOM_HANDLER; - do_rw_taskfile(drive, &args); return ide_started; @@ -847,17 +861,13 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, struct request *rq) { ide_hwif_t *hwif = HWIF(drive); - u8 *args = rq->buffer; - ide_task_t ltask; - struct ide_taskfile *tf = <ask.tf; - if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { - ide_task_t *task = rq->special; + ide_task_t *args = rq->special; - if (task == NULL) + if (!args) goto done; - hwif->data_phase = task->data_phase; + hwif->data_phase = args->data_phase; switch (hwif->data_phase) { case TASKFILE_MULTI_OUT: @@ -870,34 +880,55 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, break; } - return do_rw_taskfile(drive, task); - } - - if (args == NULL) - goto done; - - memset(<ask, 0, sizeof(ltask)); - if (rq->cmd_type == REQ_TYPE_ATA_CMD) { + if (args->tf_out_flags.all != 0) + return flagged_taskfile(drive, args); + return do_rw_taskfile(drive, args); + } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) { + u8 *args = rq->buffer; + + if (!args) + goto done; #ifdef DEBUG - printk("%s: DRIVE_CMD\n", drive->name); + printk("%s: DRIVE_TASK_CMD ", drive->name); + printk("cmd=0x%02x ", args[0]); + printk("fr=0x%02x ", args[1]); + printk("ns=0x%02x ", args[2]); + printk("sc=0x%02x ", args[3]); + printk("lcyl=0x%02x ", args[4]); + printk("hcyl=0x%02x ", args[5]); + printk("sel=0x%02x\n", args[6]); #endif - tf->feature = args[2]; - if (args[0] == WIN_SMART) { - tf->nsect = args[3]; - tf->lbal = args[1]; - tf->lbam = 0x4f; - tf->lbah = 0xc2; - ltask.tf_flags = IDE_TFLAG_OUT_TF; - } else { - tf->nsect = args[1]; - ltask.tf_flags = IDE_TFLAG_OUT_FEATURE | - IDE_TFLAG_OUT_NSECT; - } + hwif->OUTB(args[1], IDE_FEATURE_REG); + hwif->OUTB(args[3], IDE_SECTOR_REG); + hwif->OUTB(args[4], IDE_LCYL_REG); + hwif->OUTB(args[5], IDE_HCYL_REG); + hwif->OUTB((args[6] & 0xEF)|drive->select.all, IDE_SELECT_REG); + ide_cmd(drive, args[0], args[2], &drive_cmd_intr); + return ide_started; + } else if (rq->cmd_type == REQ_TYPE_ATA_CMD) { + u8 *args = rq->buffer; + + if (!args) + goto done; +#ifdef DEBUG + printk("%s: DRIVE_CMD ", drive->name); + printk("cmd=0x%02x ", args[0]); + printk("sc=0x%02x ", args[1]); + printk("fr=0x%02x ", args[2]); + printk("xx=0x%02x\n", args[3]); +#endif + if (args[0] == WIN_SMART) { + hwif->OUTB(0x4f, IDE_LCYL_REG); + hwif->OUTB(0xc2, IDE_HCYL_REG); + hwif->OUTB(args[2],IDE_FEATURE_REG); + hwif->OUTB(args[1],IDE_SECTOR_REG); + ide_cmd(drive, args[0], args[3], &drive_cmd_intr); + return ide_started; + } + hwif->OUTB(args[2],IDE_FEATURE_REG); + ide_cmd(drive, args[0], args[1], &drive_cmd_intr); + return ide_started; } - tf->command = args[0]; - ide_tf_load(drive, <ask); - ide_execute_command(drive, args[0], &drive_cmd_intr, WAIT_WORSTCASE, NULL); - return ide_started; done: /* @@ -972,7 +1003,6 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) /* bail early if we've exceeded max_failures */ if (drive->max_failures && (drive->failures > drive->max_failures)) { - rq->cmd_flags |= REQ_FAILED; goto kill_rq; } @@ -1005,6 +1035,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) ide_config_drive_speed(drive, drive->desired_speed); if (rq->cmd_type == REQ_TYPE_ATA_CMD || + rq->cmd_type == REQ_TYPE_ATA_TASK || rq->cmd_type == REQ_TYPE_ATA_TASKFILE) return execute_drive_cmd(drive, rq); else if (blk_pm_request(rq)) { @@ -1216,12 +1247,8 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif && hwif->io_ports[IDE_CONTROL_OFFSET]) { - /* - * set nIEN for previous hwif, drives in the - * quirk_list may not like intr setups/cleanups - */ - if (drive->quirk_list != 1) - hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG); + /* set nIEN for previous hwif */ + SELECT_INTERRUPT(drive); } hwgroup->hwif = hwif; hwgroup->drive = drive; @@ -1427,8 +1454,12 @@ void ide_timer_expiry (unsigned long data) */ spin_unlock(&ide_lock); hwif = HWIF(drive); +#if DISABLE_IRQ_NOSYNC + disable_irq_nosync(hwif->irq); +#else /* disable_irq_nosync ?? */ disable_irq(hwif->irq); +#endif /* DISABLE_IRQ_NOSYNC */ /* local CPU only, * as if we were handling an interrupt */ local_irq_disable(); @@ -1754,19 +1785,3 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio } EXPORT_SYMBOL(ide_do_drive_cmd); - -void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) -{ - ide_task_t task; - - memset(&task, 0, sizeof(task)); - task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM | - IDE_TFLAG_OUT_FEATURE | tf_flags; - task.tf.feature = dma; /* Use PIO/DMA */ - task.tf.lbam = bcount & 0xff; - task.tf.lbah = (bcount >> 8) & 0xff; - - ide_tf_load(drive, &task); -} - -EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load); diff --git a/trunk/drivers/ide/ide-iops.c b/trunk/drivers/ide/ide-iops.c index c97c0719ddf1..bb9693dabe41 100644 --- a/trunk/drivers/ide/ide-iops.c +++ b/trunk/drivers/ide/ide-iops.c @@ -158,6 +158,14 @@ void default_hwif_mmiops (ide_hwif_t *hwif) EXPORT_SYMBOL(default_hwif_mmiops); +u32 ide_read_24 (ide_drive_t *drive) +{ + u8 hcyl = HWIF(drive)->INB(IDE_HCYL_REG); + u8 lcyl = HWIF(drive)->INB(IDE_LCYL_REG); + u8 sect = HWIF(drive)->INB(IDE_SECTOR_REG); + return (hcyl<<16)|(lcyl<<8)|sect; +} + void SELECT_DRIVE (ide_drive_t *drive) { if (HWIF(drive)->selectproc) @@ -167,12 +175,26 @@ void SELECT_DRIVE (ide_drive_t *drive) EXPORT_SYMBOL(SELECT_DRIVE); +void SELECT_INTERRUPT (ide_drive_t *drive) +{ + if (HWIF(drive)->intrproc) + HWIF(drive)->intrproc(drive); + else + HWIF(drive)->OUTB(drive->ctl|2, IDE_CONTROL_REG); +} + void SELECT_MASK (ide_drive_t *drive, int mask) { if (HWIF(drive)->maskproc) HWIF(drive)->maskproc(drive, mask); } +void QUIRK_LIST (ide_drive_t *drive) +{ + if (HWIF(drive)->quirkproc) + drive->quirk_list = HWIF(drive)->quirkproc(drive); +} + /* * Some localbus EIDE interfaces require a special access sequence * when using 32-bit I/O instructions to transfer data. We call this @@ -427,6 +449,7 @@ int drive_is_ready (ide_drive_t *drive) udelay(1); #endif +#ifdef CONFIG_IDEPCI_SHARE_IRQ /* * We do a passive status test under shared PCI interrupts on * cards that truly share the ATA side interrupt, but may also share @@ -436,6 +459,7 @@ int drive_is_ready (ide_drive_t *drive) if (IDE_CONTROL_REG) stat = hwif->INB(IDE_ALTSTATUS_REG); else +#endif /* CONFIG_IDEPCI_SHARE_IRQ */ /* Note: this may clear a pending IRQ!! */ stat = hwif->INB(IDE_STATUS_REG); @@ -618,9 +642,9 @@ u8 eighty_ninty_three (ide_drive_t *drive) int ide_ata66_check (ide_drive_t *drive, ide_task_t *args) { - if (args->tf.command == WIN_SETFEATURES && - args->tf.lbal > XFER_UDMA_2 && - args->tf.feature == SETFEATURES_XFER) { + if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) && + (args->tfRegister[IDE_SECTOR_OFFSET] > XFER_UDMA_2) && + (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER)) { if (eighty_ninty_three(drive) == 0) { printk(KERN_WARNING "%s: UDMA speeds >UDMA33 cannot " "be set\n", drive->name); @@ -638,9 +662,9 @@ int ide_ata66_check (ide_drive_t *drive, ide_task_t *args) */ int set_transfer (ide_drive_t *drive, ide_task_t *args) { - if (args->tf.command == WIN_SETFEATURES && - args->tf.lbal >= XFER_SW_DMA_0 && - args->tf.feature == SETFEATURES_XFER && + if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) && + (args->tfRegister[IDE_SECTOR_OFFSET] >= XFER_SW_DMA_0) && + (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER) && (drive->id->dma_ultra || drive->id->dma_mword || drive->id->dma_1word)) @@ -878,9 +902,8 @@ EXPORT_SYMBOL(ide_set_handler); * handler and IRQ setup do not race. All IDE command kick off * should go via this function or do equivalent locking. */ - -void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler, - unsigned timeout, ide_expiry_t *expiry) + +void ide_execute_command(ide_drive_t *drive, task_ioreg_t cmd, ide_handler_t *handler, unsigned timeout, ide_expiry_t *expiry) { unsigned long flags; ide_hwgroup_t *hwgroup = HWGROUP(drive); @@ -1028,7 +1051,8 @@ static void ide_disk_pre_reset(ide_drive_t *drive) drive->special.all = 0; drive->special.b.set_geometry = legacy; drive->special.b.recalibrate = legacy; - drive->mult_count = 0; + if (OK_TO_RESET_CONTROLLER) + drive->mult_count = 0; if (!drive->keep_settings && !drive->using_dma) drive->mult_req = 0; if (drive->mult_req != drive->mult_count) @@ -1113,6 +1137,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) for (unit = 0; unit < MAX_DRIVES; ++unit) pre_reset(&hwif->drives[unit]); +#if OK_TO_RESET_CONTROLLER if (!IDE_CONTROL_REG) { spin_unlock_irqrestore(&ide_lock, flags); return ide_stopped; @@ -1149,8 +1174,11 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) * state when the disks are reset this way. At least, the Winbond * 553 documentation says that */ - if (hwif->resetproc) + if (hwif->resetproc != NULL) { hwif->resetproc(drive); + } + +#endif /* OK_TO_RESET_CONTROLLER */ spin_unlock_irqrestore(&ide_lock, flags); return ide_started; diff --git a/trunk/drivers/ide/ide-lib.c b/trunk/drivers/ide/ide-lib.c index a3bd8e8ed6b0..062d3bcb2471 100644 --- a/trunk/drivers/ide/ide-lib.c +++ b/trunk/drivers/ide/ide-lib.c @@ -441,12 +441,6 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) * case could happen iff the transfer mode has already been set on * the device by ide-proc.c::set_xfer_rate()). */ - if (rate < XFER_PIO_0) { - if (hwif->host_flags & IDE_HFLAG_ABUSE_SET_DMA_MODE) - return ide_set_dma_mode(drive, rate); - else - return ide_config_drive_speed(drive, rate); - } return ide_set_dma_mode(drive, rate); } @@ -464,7 +458,8 @@ static void ide_dump_opcode(ide_drive_t *drive) spin_unlock(&ide_lock); if (!rq) return; - if (rq->cmd_type == REQ_TYPE_ATA_CMD) { + if (rq->cmd_type == REQ_TYPE_ATA_CMD || + rq->cmd_type == REQ_TYPE_ATA_TASK) { char *args = rq->buffer; if (args) { opcode = args[0]; @@ -473,7 +468,8 @@ static void ide_dump_opcode(ide_drive_t *drive) } else if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { ide_task_t *args = rq->special; if (args) { - opcode = args->tf.command; + task_struct_t *tf = (task_struct_t *) args->tfRegister; + opcode = tf->command; found = 1; } } @@ -485,118 +481,141 @@ static void ide_dump_opcode(ide_drive_t *drive) printk("0x%02x\n", opcode); } -u64 ide_get_lba_addr(struct ide_taskfile *tf, int lba48) +static u8 ide_dump_ata_status(ide_drive_t *drive, const char *msg, u8 stat) { - u32 high, low; - - if (lba48) - high = (tf->hob_lbah << 16) | (tf->hob_lbam << 8) | - tf->hob_lbal; - else - high = tf->device & 0xf; - low = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal; - - return ((u64)high << 24) | low; -} -EXPORT_SYMBOL_GPL(ide_get_lba_addr); - -static void ide_dump_sector(ide_drive_t *drive) -{ - ide_task_t task; - struct ide_taskfile *tf = &task.tf; - int lba48 = (drive->addressing == 1) ? 1 : 0; - - memset(&task, 0, sizeof(task)); - if (lba48) - task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_HOB_LBA | - IDE_TFLAG_LBA48; - else - task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE; - - ide_tf_read(drive, &task); - - if (lba48 || (tf->device & ATA_LBA)) - printk(", LBAsect=%llu", - (unsigned long long)ide_get_lba_addr(tf, lba48)); - else - printk(", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam, - tf->device & 0xf, tf->lbal); -} + ide_hwif_t *hwif = HWIF(drive); + unsigned long flags; + u8 err = 0; -static void ide_dump_ata_error(ide_drive_t *drive, u8 err) -{ - printk("{ "); - if (err & ABRT_ERR) printk("DriveStatusError "); - if (err & ICRC_ERR) - printk((err & ABRT_ERR) ? "BadCRC " : "BadSector "); - if (err & ECC_ERR) printk("UncorrectableError "); - if (err & ID_ERR) printk("SectorIdNotFound "); - if (err & TRK0_ERR) printk("TrackZeroNotFound "); - if (err & MARK_ERR) printk("AddrMarkNotFound "); - printk("}"); - if ((err & (BBD_ERR | ABRT_ERR)) == BBD_ERR || - (err & (ECC_ERR|ID_ERR|MARK_ERR))) { - ide_dump_sector(drive); - if (HWGROUP(drive) && HWGROUP(drive)->rq) - printk(", sector=%llu", - (unsigned long long)HWGROUP(drive)->rq->sector); + local_irq_save(flags); + printk("%s: %s: status=0x%02x { ", drive->name, msg, stat); + if (stat & BUSY_STAT) + printk("Busy "); + else { + if (stat & READY_STAT) printk("DriveReady "); + if (stat & WRERR_STAT) printk("DeviceFault "); + if (stat & SEEK_STAT) printk("SeekComplete "); + if (stat & DRQ_STAT) printk("DataRequest "); + if (stat & ECC_STAT) printk("CorrectedError "); + if (stat & INDEX_STAT) printk("Index "); + if (stat & ERR_STAT) printk("Error "); } - printk("\n"); -} - -static void ide_dump_atapi_error(ide_drive_t *drive, u8 err) -{ - printk("{ "); - if (err & ILI_ERR) printk("IllegalLengthIndication "); - if (err & EOM_ERR) printk("EndOfMedia "); - if (err & ABRT_ERR) printk("AbortedCommand "); - if (err & MCR_ERR) printk("MediaChangeRequested "); - if (err & LFS_ERR) printk("LastFailedSense=0x%02x ", - (err & LFS_ERR) >> 4); printk("}\n"); + if ((stat & (BUSY_STAT|ERR_STAT)) == ERR_STAT) { + err = hwif->INB(IDE_ERROR_REG); + printk("%s: %s: error=0x%02x { ", drive->name, msg, err); + if (err & ABRT_ERR) printk("DriveStatusError "); + if (err & ICRC_ERR) + printk((err & ABRT_ERR) ? "BadCRC " : "BadSector "); + if (err & ECC_ERR) printk("UncorrectableError "); + if (err & ID_ERR) printk("SectorIdNotFound "); + if (err & TRK0_ERR) printk("TrackZeroNotFound "); + if (err & MARK_ERR) printk("AddrMarkNotFound "); + printk("}"); + if ((err & (BBD_ERR | ABRT_ERR)) == BBD_ERR || + (err & (ECC_ERR|ID_ERR|MARK_ERR))) { + if (drive->addressing == 1) { + __u64 sectors = 0; + u32 low = 0, high = 0; + hwif->OUTB(drive->ctl&~0x80, IDE_CONTROL_REG); + low = ide_read_24(drive); + hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG); + high = ide_read_24(drive); + sectors = ((__u64)high << 24) | low; + printk(", LBAsect=%llu, high=%d, low=%d", + (unsigned long long) sectors, + high, low); + } else { + u8 cur = hwif->INB(IDE_SELECT_REG); + if (cur & 0x40) { /* using LBA? */ + printk(", LBAsect=%ld", (unsigned long) + ((cur&0xf)<<24) + |(hwif->INB(IDE_HCYL_REG)<<16) + |(hwif->INB(IDE_LCYL_REG)<<8) + | hwif->INB(IDE_SECTOR_REG)); + } else { + printk(", CHS=%d/%d/%d", + (hwif->INB(IDE_HCYL_REG)<<8) + + hwif->INB(IDE_LCYL_REG), + cur & 0xf, + hwif->INB(IDE_SECTOR_REG)); + } + } + if (HWGROUP(drive) && HWGROUP(drive)->rq) + printk(", sector=%llu", + (unsigned long long)HWGROUP(drive)->rq->sector); + } + printk("\n"); + } + ide_dump_opcode(drive); + local_irq_restore(flags); + return err; } /** - * ide_dump_status - translate ATA/ATAPI error + * ide_dump_atapi_status - print human readable atapi status * @drive: drive that status applies to * @msg: text message to print * @stat: status byte to decode * * Error reporting, in human readable form (luxurious, but a memory hog). - * Combines the drive name, message and status byte to provide a - * user understandable explanation of the device error. */ -u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat) +static u8 ide_dump_atapi_status(ide_drive_t *drive, const char *msg, u8 stat) { unsigned long flags; - u8 err = 0; + atapi_status_t status; + atapi_error_t error; + + status.all = stat; + error.all = 0; local_irq_save(flags); printk("%s: %s: status=0x%02x { ", drive->name, msg, stat); - if (stat & BUSY_STAT) + if (status.b.bsy) printk("Busy "); else { - if (stat & READY_STAT) printk("DriveReady "); - if (stat & WRERR_STAT) printk("DeviceFault "); - if (stat & SEEK_STAT) printk("SeekComplete "); - if (stat & DRQ_STAT) printk("DataRequest "); - if (stat & ECC_STAT) printk("CorrectedError "); - if (stat & INDEX_STAT) printk("Index "); - if (stat & ERR_STAT) printk("Error "); + if (status.b.drdy) printk("DriveReady "); + if (status.b.df) printk("DeviceFault "); + if (status.b.dsc) printk("SeekComplete "); + if (status.b.drq) printk("DataRequest "); + if (status.b.corr) printk("CorrectedError "); + if (status.b.idx) printk("Index "); + if (status.b.check) printk("Error "); } printk("}\n"); - if ((stat & (BUSY_STAT|ERR_STAT)) == ERR_STAT) { - err = drive->hwif->INB(IDE_ERROR_REG); - printk("%s: %s: error=0x%02x ", drive->name, msg, err); - if (drive->media == ide_disk) - ide_dump_ata_error(drive, err); - else - ide_dump_atapi_error(drive, err); + if (status.b.check && !status.b.bsy) { + error.all = HWIF(drive)->INB(IDE_ERROR_REG); + printk("%s: %s: error=0x%02x { ", drive->name, msg, error.all); + if (error.b.ili) printk("IllegalLengthIndication "); + if (error.b.eom) printk("EndOfMedia "); + if (error.b.abrt) printk("AbortedCommand "); + if (error.b.mcr) printk("MediaChangeRequested "); + if (error.b.sense_key) printk("LastFailedSense=0x%02x ", + error.b.sense_key); + printk("}\n"); } ide_dump_opcode(drive); local_irq_restore(flags); - return err; + return error.all; +} + +/** + * ide_dump_status - translate ATA/ATAPI error + * @drive: drive the error occured on + * @msg: information string + * @stat: status byte + * + * Error reporting, in human readable form (luxurious, but a memory hog). + * Combines the drive name, message and status byte to provide a + * user understandable explanation of the device error. + */ + +u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat) +{ + if (drive->media == ide_disk) + return ide_dump_ata_status(drive, msg, stat); + return ide_dump_atapi_status(drive, msg, stat); } EXPORT_SYMBOL(ide_dump_status); diff --git a/trunk/drivers/ide/ide-probe.c b/trunk/drivers/ide/ide-probe.c index 0379d1f697cf..0cb3d2bb3ab9 100644 --- a/trunk/drivers/ide/ide-probe.c +++ b/trunk/drivers/ide/ide-probe.c @@ -95,10 +95,10 @@ static void ide_disk_init_mult_count(ide_drive_t *drive) #ifdef CONFIG_IDEDISK_MULTI_MODE id->multsect = ((id->max_multsect/2) > 1) ? id->max_multsect : 0; id->multsect_valid = id->multsect ? 1 : 0; - drive->mult_req = id->multsect_valid ? id->max_multsect : 0; + drive->mult_req = id->multsect_valid ? id->max_multsect : INITIAL_MULT_COUNT; drive->special.b.set_multmode = drive->mult_req ? 1 : 0; #else /* original, pre IDE-NFG, per request of AC */ - drive->mult_req = 0; + drive->mult_req = INITIAL_MULT_COUNT; if (drive->mult_req > id->max_multsect) drive->mult_req = id->max_multsect; if (drive->mult_req || ((id->multsect_valid & 1) && id->multsect)) @@ -234,10 +234,7 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd) drive->media = ide_disk; printk("%s DISK drive\n", (id->config == 0x848a) ? "CFA" : "ATA" ); - - if (hwif->quirkproc) - drive->quirk_list = hwif->quirkproc(drive); - + QUIRK_LIST(drive); return; err_misc: @@ -833,8 +830,16 @@ static void probe_hwif(ide_hwif_t *hwif) drive->nice1 = 1; - if (hwif->ide_dma_on) + if (hwif->ide_dma_on) { + /* + * Force DMAing for the beginning of the check. + * Some chipsets appear to do interesting + * things, if not checked and cleared. + * PARANOIA!!! + */ + hwif->dma_off_quietly(drive); ide_set_dma(drive); + } } } @@ -963,6 +968,11 @@ static int ide_init_queue(ide_drive_t *drive) * Much of the code is for correctly detecting/handling irq sharing * and irq serialization situations. This is somewhat complex because * it handles static as well as dynamic (PCMCIA) IDE interfaces. + * + * The IRQF_DISABLED in sa_flags means ide_intr() is always entered with + * interrupts completely disabled. This can be bad for interrupt latency, + * but anything else has led to problems on some machines. We re-enable + * interrupts as much as we can safely do in most places. */ static int init_irq (ide_hwif_t *hwif) { @@ -1045,13 +1055,17 @@ static int init_irq (ide_hwif_t *hwif) * Allocate the irq, if not already obtained for another hwif */ if (!match || match->irq != hwif->irq) { - int sa = 0; + int sa = IRQF_DISABLED; #if defined(__mc68000__) || defined(CONFIG_APUS) sa = IRQF_SHARED; #endif /* __mc68000__ || CONFIG_APUS */ - if (IDE_CHIPSET_IS_PCI(hwif->chipset)) + if (IDE_CHIPSET_IS_PCI(hwif->chipset)) { sa = IRQF_SHARED; +#ifndef CONFIG_IDEPCI_SHARE_IRQ + sa |= IRQF_DISABLED; +#endif /* CONFIG_IDEPCI_SHARE_IRQ */ + } if (hwif->io_ports[IDE_CONTROL_OFFSET]) /* clear nIEN */ diff --git a/trunk/drivers/ide/ide-tape.c b/trunk/drivers/ide/ide-tape.c index 3cbca3f4628a..1495792d7917 100644 --- a/trunk/drivers/ide/ide-tape.c +++ b/trunk/drivers/ide/ide-tape.c @@ -614,6 +614,16 @@ typedef struct os_dat_s { /*************************** End of tunable parameters ***********************/ +/* + * Debugging/Performance analysis + * + * I/O trace support + */ +#define USE_IOTRACE 0 +#if USE_IOTRACE +#define IO_IDETAPE_FIFO 500 +#endif + /* * Read/Write error simulation */ @@ -1808,8 +1818,9 @@ static ide_startstop_t idetape_retry_pc (ide_drive_t *drive) idetape_tape_t *tape = drive->driver_data; idetape_pc_t *pc; struct request *rq; + atapi_error_t error; - (void)drive->hwif->INB(IDE_ERROR_REG); + error.all = HWIF(drive)->INB(IDE_ERROR_REG); pc = idetape_next_pc_storage(drive); rq = idetape_next_rq_storage(drive); idetape_create_request_sense_cmd(pc); @@ -1847,13 +1858,15 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; idetape_tape_t *tape = drive->driver_data; + atapi_status_t status; + atapi_bcount_t bcount; + atapi_ireason_t ireason; idetape_pc_t *pc = tape->pc; + unsigned int temp; #if SIMULATE_ERRORS static int error_sim_count = 0; #endif - u16 bcount; - u8 stat, ireason; #if IDETAPE_DEBUG_LOG if (tape->debug_level >= 4) @@ -1862,10 +1875,10 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive) #endif /* IDETAPE_DEBUG_LOG */ /* Clear the interrupt */ - stat = hwif->INB(IDE_STATUS_REG); + status.all = HWIF(drive)->INB(IDE_STATUS_REG); if (test_bit(PC_DMA_IN_PROGRESS, &pc->flags)) { - if (hwif->ide_dma_end(drive) || (stat & ERR_STAT)) { + if (HWIF(drive)->ide_dma_end(drive) || status.b.check) { /* * A DMA error is sometimes expected. For example, * if the tape is crossing a filemark during a @@ -1899,7 +1912,7 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive) } /* No more interrupts */ - if ((stat & DRQ_STAT) == 0) { + if (!status.b.drq) { #if IDETAPE_DEBUG_LOG if (tape->debug_level >= 2) printk(KERN_INFO "ide-tape: Packet command completed, %d bytes transferred\n", pc->actually_transferred); @@ -1914,13 +1927,12 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive) (++error_sim_count % 100) == 0) { printk(KERN_INFO "ide-tape: %s: simulating error\n", tape->name); - stat |= ERR_STAT; + status.b.check = 1; } #endif - if ((stat & ERR_STAT) && pc->c[0] == IDETAPE_REQUEST_SENSE_CMD) - stat &= ~ERR_STAT; - if ((stat & ERR_STAT) || test_bit(PC_DMA_ERROR, &pc->flags)) { - /* Error detected */ + if (status.b.check && pc->c[0] == IDETAPE_REQUEST_SENSE_CMD) + status.b.check = 0; + if (status.b.check || test_bit(PC_DMA_ERROR, &pc->flags)) { /* Error detected */ #if IDETAPE_DEBUG_LOG if (tape->debug_level >= 1) printk(KERN_INFO "ide-tape: %s: I/O error\n", @@ -1939,7 +1951,7 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive) } pc->error = 0; if (test_bit(PC_WAIT_FOR_DSC, &pc->flags) && - (stat & SEEK_STAT) == 0) { + !status.b.dsc) { /* Media access command */ tape->dsc_polling_start = jiffies; tape->dsc_polling_frequency = IDETAPE_DSC_MA_FAST; @@ -1961,30 +1973,30 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive) return ide_do_reset(drive); } /* Get the number of bytes to transfer on this interrupt. */ - bcount = (hwif->INB(IDE_BCOUNTH_REG) << 8) | - hwif->INB(IDE_BCOUNTL_REG); + bcount.b.high = hwif->INB(IDE_BCOUNTH_REG); + bcount.b.low = hwif->INB(IDE_BCOUNTL_REG); - ireason = hwif->INB(IDE_IREASON_REG); + ireason.all = hwif->INB(IDE_IREASON_REG); - if (ireason & CD) { + if (ireason.b.cod) { printk(KERN_ERR "ide-tape: CoD != 0 in idetape_pc_intr\n"); return ide_do_reset(drive); } - if (((ireason & IO) == IO) == test_bit(PC_WRITING, &pc->flags)) { + if (ireason.b.io == test_bit(PC_WRITING, &pc->flags)) { /* Hopefully, we will never get here */ printk(KERN_ERR "ide-tape: We wanted to %s, ", - (ireason & IO) ? "Write" : "Read"); + ireason.b.io ? "Write":"Read"); printk(KERN_ERR "ide-tape: but the tape wants us to %s !\n", - (ireason & IO) ? "Read" : "Write"); + ireason.b.io ? "Read":"Write"); return ide_do_reset(drive); } if (!test_bit(PC_WRITING, &pc->flags)) { /* Reading - Check that we have enough space */ - temp = pc->actually_transferred + bcount; + temp = pc->actually_transferred + bcount.all; if (temp > pc->request_transfer) { if (temp > pc->buffer_size) { printk(KERN_ERR "ide-tape: The tape wants to send us more data than expected - discarding data\n"); - idetape_discard_data(drive, bcount); + idetape_discard_data(drive, bcount.all); ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL); return ide_started; } @@ -1996,26 +2008,23 @@ static ide_startstop_t idetape_pc_intr (ide_drive_t *drive) } if (test_bit(PC_WRITING, &pc->flags)) { if (pc->bh != NULL) - idetape_output_buffers(drive, pc, bcount); + idetape_output_buffers(drive, pc, bcount.all); else /* Write the current buffer */ - hwif->atapi_output_bytes(drive, pc->current_position, - bcount); + HWIF(drive)->atapi_output_bytes(drive, pc->current_position, bcount.all); } else { if (pc->bh != NULL) - idetape_input_buffers(drive, pc, bcount); + idetape_input_buffers(drive, pc, bcount.all); else /* Read the current buffer */ - hwif->atapi_input_bytes(drive, pc->current_position, - bcount); + HWIF(drive)->atapi_input_bytes(drive, pc->current_position, bcount.all); } /* Update the current position */ - pc->actually_transferred += bcount; - pc->current_position += bcount; + pc->actually_transferred += bcount.all; + pc->current_position += bcount.all; #if IDETAPE_DEBUG_LOG if (tape->debug_level >= 2) - printk(KERN_INFO "ide-tape: [cmd %x] transferred %d bytes " - "on that interrupt\n", pc->c[0], bcount); + printk(KERN_INFO "ide-tape: [cmd %x] transferred %d bytes on that interrupt\n", pc->c[0], bcount.all); #endif /* And set the interrupt handler again */ ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL); @@ -2069,28 +2078,28 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive) ide_hwif_t *hwif = drive->hwif; idetape_tape_t *tape = drive->driver_data; idetape_pc_t *pc = tape->pc; + atapi_ireason_t ireason; int retries = 100; ide_startstop_t startstop; - u8 ireason; if (ide_wait_stat(&startstop,drive,DRQ_STAT,BUSY_STAT,WAIT_READY)) { printk(KERN_ERR "ide-tape: Strange, packet command initiated yet DRQ isn't asserted\n"); return startstop; } - ireason = hwif->INB(IDE_IREASON_REG); - while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) { + ireason.all = hwif->INB(IDE_IREASON_REG); + while (retries-- && (!ireason.b.cod || ireason.b.io)) { printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing " "a packet command, retrying\n"); udelay(100); - ireason = hwif->INB(IDE_IREASON_REG); + ireason.all = hwif->INB(IDE_IREASON_REG); if (retries == 0) { printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while " "issuing a packet command, ignoring\n"); - ireason |= CD; - ireason &= ~IO; + ireason.b.cod = 1; + ireason.b.io = 0; } } - if ((ireason & CD) == 0 || (ireason & IO)) { + if (!ireason.b.cod || ireason.b.io) { printk(KERN_ERR "ide-tape: (IO,CoD) != (0,1) while issuing " "a packet command\n"); return ide_do_reset(drive); @@ -2111,8 +2120,8 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape { ide_hwif_t *hwif = drive->hwif; idetape_tape_t *tape = drive->driver_data; + atapi_bcount_t bcount; int dma_ok = 0; - u16 bcount; #if IDETAPE_DEBUG_BUGS if (tape->pc->c[0] == IDETAPE_REQUEST_SENSE_CMD && @@ -2161,7 +2170,7 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape pc->actually_transferred = 0; pc->current_position = pc->buffer; /* Request to transfer the entire buffer at once */ - bcount = pc->request_transfer; + bcount.all = pc->request_transfer; if (test_and_clear_bit(PC_DMA_ERROR, &pc->flags)) { printk(KERN_WARNING "ide-tape: DMA disabled, " @@ -2171,9 +2180,12 @@ static ide_startstop_t idetape_issue_packet_command (ide_drive_t *drive, idetape if (test_bit(PC_DMA_RECOMMENDED, &pc->flags) && drive->using_dma) dma_ok = !hwif->dma_setup(drive); - ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK | - IDE_TFLAG_OUT_DEVICE, bcount, dma_ok); - + if (IDE_CONTROL_REG) + hwif->OUTB(drive->ctl, IDE_CONTROL_REG); + hwif->OUTB(dma_ok ? 1 : 0, IDE_FEATURE_REG); /* Use PIO/DMA */ + hwif->OUTB(bcount.b.high, IDE_BCOUNTH_REG); + hwif->OUTB(bcount.b.low, IDE_BCOUNTL_REG); + hwif->OUTB(drive->select.all, IDE_SELECT_REG); if (dma_ok) /* Will begin DMA later */ set_bit(PC_DMA_IN_PROGRESS, &pc->flags); if (test_bit(IDETAPE_DRQ_INTERRUPT, &tape->flags)) { @@ -2283,11 +2295,11 @@ static ide_startstop_t idetape_media_access_finished (ide_drive_t *drive) { idetape_tape_t *tape = drive->driver_data; idetape_pc_t *pc = tape->pc; - u8 stat; + atapi_status_t status; - stat = drive->hwif->INB(IDE_STATUS_REG); - if (stat & SEEK_STAT) { - if (stat & ERR_STAT) { + status.all = HWIF(drive)->INB(IDE_STATUS_REG); + if (status.b.dsc) { + if (status.b.check) { /* Error detected */ if (pc->c[0] != IDETAPE_TEST_UNIT_READY_CMD) printk(KERN_ERR "ide-tape: %s: I/O error, ", @@ -2405,7 +2417,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, idetape_tape_t *tape = drive->driver_data; idetape_pc_t *pc = NULL; struct request *postponed_rq = tape->postponed_rq; - u8 stat; + atapi_status_t status; #if IDETAPE_DEBUG_LOG #if 0 @@ -2453,7 +2465,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, * If the tape is still busy, postpone our request and service * the other device meanwhile. */ - stat = drive->hwif->INB(IDE_STATUS_REG); + status.all = HWIF(drive)->INB(IDE_STATUS_REG); if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2)) set_bit(IDETAPE_IGNORE_DSC, &tape->flags); @@ -2469,7 +2481,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, tape->insert_speed = tape->insert_size / 1024 * HZ / (jiffies - tape->insert_time); calculate_speeds(drive); if (!test_and_clear_bit(IDETAPE_IGNORE_DSC, &tape->flags) && - (stat & SEEK_STAT) == 0) { + !status.b.dsc) { if (postponed_rq == NULL) { tape->dsc_polling_start = jiffies; tape->dsc_polling_frequency = tape->best_dsc_rw_frequency; @@ -2490,6 +2502,9 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, } if (rq->cmd[0] & REQ_IDETAPE_READ) { tape->buffer_head++; +#if USE_IOTRACE + IO_trace(IO_IDETAPE_FIFO, tape->pipeline_head, tape->buffer_head, tape->tape_head, tape->minor); +#endif tape->postpone_cnt = 0; pc = idetape_next_pc_storage(drive); idetape_create_read_cmd(tape, pc, rq->current_nr_sectors, (struct idetape_bh *)rq->special); @@ -2497,6 +2512,9 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, } if (rq->cmd[0] & REQ_IDETAPE_WRITE) { tape->buffer_head++; +#if USE_IOTRACE + IO_trace(IO_IDETAPE_FIFO, tape->pipeline_head, tape->buffer_head, tape->tape_head, tape->minor); +#endif tape->postpone_cnt = 0; pc = idetape_next_pc_storage(drive); idetape_create_write_cmd(tape, pc, rq->current_nr_sectors, (struct idetape_bh *)rq->special); @@ -3223,6 +3241,9 @@ static int idetape_add_chrdev_write_request (ide_drive_t *drive, int blocks) idetape_switch_buffers(tape, new_stage); idetape_add_stage_tail(drive, new_stage); tape->pipeline_head++; +#if USE_IOTRACE + IO_trace(IO_IDETAPE_FIFO, tape->pipeline_head, tape->buffer_head, tape->tape_head, tape->minor); +#endif calculate_speeds(drive); /* @@ -3472,6 +3493,9 @@ static int idetape_add_chrdev_read_request (ide_drive_t *drive,int blocks) idetape_remove_stage_head(drive); spin_unlock_irqrestore(&tape->spinlock, flags); tape->pipeline_head++; +#if USE_IOTRACE + IO_trace(IO_IDETAPE_FIFO, tape->pipeline_head, tape->buffer_head, tape->tape_head, tape->minor); +#endif calculate_speeds(drive); } #if IDETAPE_DEBUG_BUGS diff --git a/trunk/drivers/ide/ide-taskfile.c b/trunk/drivers/ide/ide-taskfile.c index 2d63ea9ee61b..2b60f1b0437e 100644 --- a/trunk/drivers/ide/ide-taskfile.c +++ b/trunk/drivers/ide/ide-taskfile.c @@ -63,78 +63,65 @@ static void taskfile_output_data(ide_drive_t *drive, void *buffer, u32 wcount) } } -void ide_tf_load(ide_drive_t *drive, ide_task_t *task) -{ - ide_hwif_t *hwif = drive->hwif; - struct ide_taskfile *tf = &task->tf; - u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; - - if (task->tf_flags & IDE_TFLAG_FLAGGED) - HIHI = 0xFF; - -#ifdef DEBUG - printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x " - "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n", - drive->name, tf->feature, tf->nsect, tf->lbal, - tf->lbam, tf->lbah, tf->device, tf->command); -#endif - - if (IDE_CONTROL_REG) - hwif->OUTB(drive->ctl, IDE_CONTROL_REG); /* clear nIEN */ - - if ((task->tf_flags & IDE_TFLAG_NO_SELECT_MASK) == 0) - SELECT_MASK(drive, 0); - - if (task->tf_flags & IDE_TFLAG_OUT_DATA) - hwif->OUTW((tf->hob_data << 8) | tf->data, IDE_DATA_REG); - - if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) - hwif->OUTB(tf->hob_feature, IDE_FEATURE_REG); - if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) - hwif->OUTB(tf->hob_nsect, IDE_NSECTOR_REG); - if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) - hwif->OUTB(tf->hob_lbal, IDE_SECTOR_REG); - if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) - hwif->OUTB(tf->hob_lbam, IDE_LCYL_REG); - if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) - hwif->OUTB(tf->hob_lbah, IDE_HCYL_REG); - - if (task->tf_flags & IDE_TFLAG_OUT_FEATURE) - hwif->OUTB(tf->feature, IDE_FEATURE_REG); - if (task->tf_flags & IDE_TFLAG_OUT_NSECT) - hwif->OUTB(tf->nsect, IDE_NSECTOR_REG); - if (task->tf_flags & IDE_TFLAG_OUT_LBAL) - hwif->OUTB(tf->lbal, IDE_SECTOR_REG); - if (task->tf_flags & IDE_TFLAG_OUT_LBAM) - hwif->OUTB(tf->lbam, IDE_LCYL_REG); - if (task->tf_flags & IDE_TFLAG_OUT_LBAH) - hwif->OUTB(tf->lbah, IDE_HCYL_REG); - - if (task->tf_flags & IDE_TFLAG_OUT_DEVICE) - hwif->OUTB((tf->device & HIHI) | drive->select.all, IDE_SELECT_REG); -} - int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf) { ide_task_t args; - memset(&args, 0, sizeof(ide_task_t)); - args.tf.nsect = 0x01; + args.tfRegister[IDE_NSECTOR_OFFSET] = 0x01; if (drive->media == ide_disk) - args.tf.command = WIN_IDENTIFY; + args.tfRegister[IDE_COMMAND_OFFSET] = WIN_IDENTIFY; else - args.tf.command = WIN_PIDENTIFY; - args.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; - args.data_phase = TASKFILE_IN; - return ide_raw_taskfile(drive, &args, buf, 1); + args.tfRegister[IDE_COMMAND_OFFSET] = WIN_PIDENTIFY; + args.command_type = IDE_DRIVE_TASK_IN; + args.data_phase = TASKFILE_IN; + args.handler = &task_in_intr; + return ide_raw_taskfile(drive, &args, buf); } -static int inline task_dma_ok(ide_task_t *task) +ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task) { - if (blk_fs_request(task->rq) || (task->tf_flags & IDE_TFLAG_FLAGGED)) - return 1; + ide_hwif_t *hwif = HWIF(drive); + task_struct_t *taskfile = (task_struct_t *) task->tfRegister; + hob_struct_t *hobfile = (hob_struct_t *) task->hobRegister; + u8 HIHI = (drive->addressing == 1) ? 0xE0 : 0xEF; + + /* ALL Command Block Executions SHALL clear nIEN, unless otherwise */ + if (IDE_CONTROL_REG) { + /* clear nIEN */ + hwif->OUTB(drive->ctl, IDE_CONTROL_REG); + } + SELECT_MASK(drive, 0); + + if (drive->addressing == 1) { + hwif->OUTB(hobfile->feature, IDE_FEATURE_REG); + hwif->OUTB(hobfile->sector_count, IDE_NSECTOR_REG); + hwif->OUTB(hobfile->sector_number, IDE_SECTOR_REG); + hwif->OUTB(hobfile->low_cylinder, IDE_LCYL_REG); + hwif->OUTB(hobfile->high_cylinder, IDE_HCYL_REG); + } + + hwif->OUTB(taskfile->feature, IDE_FEATURE_REG); + hwif->OUTB(taskfile->sector_count, IDE_NSECTOR_REG); + hwif->OUTB(taskfile->sector_number, IDE_SECTOR_REG); + hwif->OUTB(taskfile->low_cylinder, IDE_LCYL_REG); + hwif->OUTB(taskfile->high_cylinder, IDE_HCYL_REG); + + hwif->OUTB((taskfile->device_head & HIHI) | drive->select.all, IDE_SELECT_REG); + + if (task->handler != NULL) { + if (task->prehandler != NULL) { + hwif->OUTBSYNC(drive, taskfile->command, IDE_COMMAND_REG); + ndelay(400); /* FIXME */ + return task->prehandler(drive, task->rq); + } + ide_execute_command(drive, taskfile->command, task->handler, WAIT_WORSTCASE, NULL); + return ide_started; + } + + if (!drive->using_dma) + return ide_stopped; - switch (task->tf.command) { + switch (taskfile->command) { case WIN_WRITEDMA_ONCE: case WIN_WRITEDMA: case WIN_WRITEDMA_EXT: @@ -142,79 +129,24 @@ static int inline task_dma_ok(ide_task_t *task) case WIN_READDMA: case WIN_READDMA_EXT: case WIN_IDENTIFY_DMA: - return 1; - } - - return 0; -} - -static ide_startstop_t task_no_data_intr(ide_drive_t *); -static ide_startstop_t set_geometry_intr(ide_drive_t *); -static ide_startstop_t recal_intr(ide_drive_t *); -static ide_startstop_t set_multmode_intr(ide_drive_t *); -static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct request *); -static ide_startstop_t task_in_intr(ide_drive_t *); - -ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task) -{ - ide_hwif_t *hwif = HWIF(drive); - struct ide_taskfile *tf = &task->tf; - ide_handler_t *handler = NULL; - - if (task->data_phase == TASKFILE_MULTI_IN || - task->data_phase == TASKFILE_MULTI_OUT) { - if (!drive->mult_count) { - printk(KERN_ERR "%s: multimode not set!\n", - drive->name); - return ide_stopped; - } - } - - if (task->tf_flags & IDE_TFLAG_FLAGGED) - task->tf_flags |= IDE_TFLAG_FLAGGED_SET_IN_FLAGS; - - if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) - ide_tf_load(drive, task); - - switch (task->data_phase) { - case TASKFILE_MULTI_OUT: - case TASKFILE_OUT: - hwif->OUTBSYNC(drive, tf->command, IDE_COMMAND_REG); - ndelay(400); /* FIXME */ - return pre_task_out_intr(drive, task->rq); - case TASKFILE_MULTI_IN: - case TASKFILE_IN: - handler = task_in_intr; - /* fall-through */ - case TASKFILE_NO_DATA: - if (handler == NULL) - handler = task_no_data_intr; - /* WIN_{SPECIFY,RESTORE,SETMULT} use custom handlers */ - if (task->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) { - switch (tf->command) { - case WIN_SPECIFY: handler = set_geometry_intr; break; - case WIN_RESTORE: handler = recal_intr; break; - case WIN_SETMULT: handler = set_multmode_intr; break; + if (!hwif->dma_setup(drive)) { + hwif->dma_exec_cmd(drive, taskfile->command); + hwif->dma_start(drive); + return ide_started; } - } - ide_execute_command(drive, tf->command, handler, - WAIT_WORSTCASE, NULL); - return ide_started; - default: - if (task_dma_ok(task) == 0 || drive->using_dma == 0 || - hwif->dma_setup(drive)) - return ide_stopped; - hwif->dma_exec_cmd(drive, tf->command); - hwif->dma_start(drive); - return ide_started; + break; + default: + if (task->handler == NULL) + return ide_stopped; } + + return ide_stopped; } -EXPORT_SYMBOL_GPL(do_rw_taskfile); /* * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd. */ -static ide_startstop_t set_multmode_intr(ide_drive_t *drive) +ide_startstop_t set_multmode_intr (ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); u8 stat; @@ -232,7 +164,7 @@ static ide_startstop_t set_multmode_intr(ide_drive_t *drive) /* * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd. */ -static ide_startstop_t set_geometry_intr(ide_drive_t *drive) +ide_startstop_t set_geometry_intr (ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); int retries = 5; @@ -255,7 +187,7 @@ static ide_startstop_t set_geometry_intr(ide_drive_t *drive) /* * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd. */ -static ide_startstop_t recal_intr(ide_drive_t *drive) +ide_startstop_t recal_intr (ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); u8 stat; @@ -268,7 +200,7 @@ static ide_startstop_t recal_intr(ide_drive_t *drive) /* * Handler for commands without a data phase */ -static ide_startstop_t task_no_data_intr(ide_drive_t *drive) +ide_startstop_t task_no_data_intr (ide_drive_t *drive) { ide_task_t *args = HWGROUP(drive)->rq->special; ide_hwif_t *hwif = HWIF(drive); @@ -285,6 +217,8 @@ static ide_startstop_t task_no_data_intr(ide_drive_t *drive) return ide_stopped; } +EXPORT_SYMBOL(task_no_data_intr); + static u8 wait_drive_not_busy(ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); @@ -429,7 +363,7 @@ static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat) if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { ide_task_t *task = rq->special; - if (task->tf_flags & IDE_TFLAG_FLAGGED) { + if (task->tf_out_flags.all) { u8 err = drive->hwif->INB(IDE_ERROR_REG); ide_end_drive_cmd(drive, stat, err); return; @@ -448,7 +382,7 @@ static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat) /* * Handler for command with PIO data-in phase (Read/Read Multiple). */ -static ide_startstop_t task_in_intr(ide_drive_t *drive) +ide_startstop_t task_in_intr (ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; struct request *rq = HWGROUP(drive)->rq; @@ -479,6 +413,7 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive) return ide_started; } +EXPORT_SYMBOL(task_in_intr); /* * Handler for command with PIO data-out phase (Write/Write Multiple). @@ -508,7 +443,7 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive) return ide_started; } -static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, struct request *rq) +ide_startstop_t pre_task_out_intr (ide_drive_t *drive, struct request *rq) { ide_startstop_t startstop; @@ -529,8 +464,9 @@ static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, struct request *rq) return ide_started; } +EXPORT_SYMBOL(pre_task_out_intr); -int ide_raw_taskfile(ide_drive_t *drive, ide_task_t *task, u8 *buf, u16 nsect) +static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long data_size, u8 *buf) { struct request rq; @@ -545,27 +481,36 @@ int ide_raw_taskfile(ide_drive_t *drive, ide_task_t *task, u8 *buf, u16 nsect) * if we would find a solution to transfer any size. * To support special commands like READ LONG. */ - rq.hard_nr_sectors = rq.nr_sectors = nsect; - rq.hard_cur_sectors = rq.current_nr_sectors = nsect; + if (args->command_type != IDE_DRIVE_TASK_NO_DATA) { + if (data_size == 0) + rq.nr_sectors = (args->hobRegister[IDE_NSECTOR_OFFSET] << 8) | args->tfRegister[IDE_NSECTOR_OFFSET]; + else + rq.nr_sectors = data_size / SECTOR_SIZE; + + if (!rq.nr_sectors) { + printk(KERN_ERR "%s: in/out command without data\n", + drive->name); + return -EFAULT; + } - if (task->tf_flags & IDE_TFLAG_WRITE) - rq.cmd_flags |= REQ_RW; + rq.hard_nr_sectors = rq.nr_sectors; + rq.hard_cur_sectors = rq.current_nr_sectors = rq.nr_sectors; - rq.special = task; - task->rq = &rq; + if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE) + rq.cmd_flags |= REQ_RW; + } + rq.special = args; + args->rq = &rq; return ide_do_drive_cmd(drive, &rq, ide_wait); } -EXPORT_SYMBOL(ide_raw_taskfile); - -int ide_no_data_taskfile(ide_drive_t *drive, ide_task_t *task) +int ide_raw_taskfile (ide_drive_t *drive, ide_task_t *args, u8 *buf) { - task->data_phase = TASKFILE_NO_DATA; - - return ide_raw_taskfile(drive, task, NULL, 0); + return ide_diag_taskfile(drive, args, 0, buf); } -EXPORT_SYMBOL_GPL(ide_no_data_taskfile); + +EXPORT_SYMBOL(ide_raw_taskfile); #ifdef CONFIG_IDE_TASK_IOCTL int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) @@ -574,12 +519,12 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) ide_task_t args; u8 *outbuf = NULL; u8 *inbuf = NULL; - u8 *data_buf = NULL; + task_ioreg_t *argsptr = args.tfRegister; + task_ioreg_t *hobsptr = args.hobRegister; int err = 0; int tasksize = sizeof(struct ide_task_request_s); unsigned int taskin = 0; unsigned int taskout = 0; - u16 nsect = 0; u8 io_32bit = drive->io_32bit; char __user *buf = (char __user *)arg; @@ -627,52 +572,24 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) } memset(&args, 0, sizeof(ide_task_t)); + memcpy(argsptr, req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE); + memcpy(hobsptr, req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE); - memcpy(&args.tf_array[0], req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2); - memcpy(&args.tf_array[6], req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE); - - args.data_phase = req_task->data_phase; - - args.tf_flags = IDE_TFLAG_OUT_DEVICE; - if (drive->addressing == 1) - args.tf_flags |= IDE_TFLAG_LBA48; - - if (req_task->out_flags.all) { - args.tf_flags |= IDE_TFLAG_FLAGGED; - - if (req_task->out_flags.b.data) - args.tf_flags |= IDE_TFLAG_OUT_DATA; - - if (req_task->out_flags.b.nsector_hob) - args.tf_flags |= IDE_TFLAG_OUT_HOB_NSECT; - if (req_task->out_flags.b.sector_hob) - args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAL; - if (req_task->out_flags.b.lcyl_hob) - args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAM; - if (req_task->out_flags.b.hcyl_hob) - args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAH; - - if (req_task->out_flags.b.error_feature) - args.tf_flags |= IDE_TFLAG_OUT_FEATURE; - if (req_task->out_flags.b.nsector) - args.tf_flags |= IDE_TFLAG_OUT_NSECT; - if (req_task->out_flags.b.sector) - args.tf_flags |= IDE_TFLAG_OUT_LBAL; - if (req_task->out_flags.b.lcyl) - args.tf_flags |= IDE_TFLAG_OUT_LBAM; - if (req_task->out_flags.b.hcyl) - args.tf_flags |= IDE_TFLAG_OUT_LBAH; - } else { - args.tf_flags |= IDE_TFLAG_OUT_TF; - if (args.tf_flags & IDE_TFLAG_LBA48) - args.tf_flags |= IDE_TFLAG_OUT_HOB; - } - - if (req_task->in_flags.b.data) - args.tf_flags |= IDE_TFLAG_IN_DATA; + args.tf_in_flags = req_task->in_flags; + args.tf_out_flags = req_task->out_flags; + args.data_phase = req_task->data_phase; + args.command_type = req_task->req_cmd; drive->io_32bit = 0; switch(req_task->data_phase) { + case TASKFILE_OUT_DMAQ: + case TASKFILE_OUT_DMA: + err = ide_diag_taskfile(drive, &args, taskout, outbuf); + break; + case TASKFILE_IN_DMAQ: + case TASKFILE_IN_DMA: + err = ide_diag_taskfile(drive, &args, taskin, inbuf); + break; case TASKFILE_MULTI_OUT: if (!drive->mult_count) { /* (hs): give up if multcount is not set */ @@ -684,11 +601,9 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) } /* fall through */ case TASKFILE_OUT: - /* fall through */ - case TASKFILE_OUT_DMAQ: - case TASKFILE_OUT_DMA: - nsect = taskout / SECTOR_SIZE; - data_buf = outbuf; + args.prehandler = &pre_task_out_intr; + args.handler = &task_out_intr; + err = ide_diag_taskfile(drive, &args, taskout, outbuf); break; case TASKFILE_MULTI_IN: if (!drive->mult_count) { @@ -701,46 +616,22 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) } /* fall through */ case TASKFILE_IN: - /* fall through */ - case TASKFILE_IN_DMAQ: - case TASKFILE_IN_DMA: - nsect = taskin / SECTOR_SIZE; - data_buf = inbuf; + args.handler = &task_in_intr; + err = ide_diag_taskfile(drive, &args, taskin, inbuf); break; case TASKFILE_NO_DATA: + args.handler = &task_no_data_intr; + err = ide_diag_taskfile(drive, &args, 0, NULL); break; default: err = -EFAULT; goto abort; } - if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA) - nsect = 0; - else if (!nsect) { - nsect = (args.tf.hob_nsect << 8) | args.tf.nsect; - - if (!nsect) { - printk(KERN_ERR "%s: in/out command without data\n", - drive->name); - err = -EFAULT; - goto abort; - } - } - - if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE) - args.tf_flags |= IDE_TFLAG_WRITE; - - err = ide_raw_taskfile(drive, &args, data_buf, nsect); - - memcpy(req_task->hob_ports, &args.tf_array[0], HDIO_DRIVE_HOB_HDR_SIZE - 2); - memcpy(req_task->io_ports, &args.tf_array[6], HDIO_DRIVE_TASK_HDR_SIZE); - - if ((args.tf_flags & IDE_TFLAG_FLAGGED_SET_IN_FLAGS) && - req_task->in_flags.all == 0) { - req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS; - if (drive->addressing == 1) - req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8); - } + memcpy(req_task->io_ports, &(args.tfRegister), HDIO_DRIVE_TASK_HDR_SIZE); + memcpy(req_task->hob_ports, &(args.hobRegister), HDIO_DRIVE_HOB_HDR_SIZE); + req_task->in_flags = args.tf_in_flags; + req_task->out_flags = args.tf_out_flags; if (copy_to_user(buf, req_task, tasksize)) { err = -EFAULT; @@ -797,7 +688,6 @@ int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) u8 xfer_rate = 0; int argsize = 4; ide_task_t tfargs; - struct ide_taskfile *tf = &tfargs.tf; if (NULL == (void *) arg) { struct request rq; @@ -809,10 +699,13 @@ int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) return -EFAULT; memset(&tfargs, 0, sizeof(ide_task_t)); - tf->feature = args[2]; - tf->nsect = args[3]; - tf->lbal = args[1]; - tf->command = args[0]; + tfargs.tfRegister[IDE_FEATURE_OFFSET] = args[2]; + tfargs.tfRegister[IDE_NSECTOR_OFFSET] = args[3]; + tfargs.tfRegister[IDE_SECTOR_OFFSET] = args[1]; + tfargs.tfRegister[IDE_LCYL_OFFSET] = 0x00; + tfargs.tfRegister[IDE_HCYL_OFFSET] = 0x00; + tfargs.tfRegister[IDE_SELECT_OFFSET] = 0x00; + tfargs.tfRegister[IDE_COMMAND_OFFSET] = args[0]; if (args[3]) { argsize = 4 + (SECTOR_WORDS * 4 * args[3]); @@ -841,28 +734,135 @@ int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) return err; } +static int ide_wait_cmd_task(ide_drive_t *drive, u8 *buf) +{ + struct request rq; + + ide_init_drive_cmd(&rq); + rq.cmd_type = REQ_TYPE_ATA_TASK; + rq.buffer = buf; + return ide_do_drive_cmd(drive, &rq, ide_wait); +} + int ide_task_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg) { void __user *p = (void __user *)arg; int err = 0; - u8 args[7]; - ide_task_t task; + u8 args[7], *argbuf = args; + int argsize = 7; if (copy_from_user(args, p, 7)) return -EFAULT; + err = ide_wait_cmd_task(drive, argbuf); + if (copy_to_user(p, argbuf, argsize)) + err = -EFAULT; + return err; +} - memset(&task, 0, sizeof(task)); - memcpy(&task.tf_array[7], &args[1], 6); - task.tf.command = args[0]; - task.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE; +/* + * NOTICE: This is additions from IBM to provide a discrete interface, + * for selective taskregister access operations. Nice JOB Klaus!!! + * Glad to be able to work and co-develop this with you and IBM. + */ +ide_startstop_t flagged_taskfile (ide_drive_t *drive, ide_task_t *task) +{ + ide_hwif_t *hwif = HWIF(drive); + task_struct_t *taskfile = (task_struct_t *) task->tfRegister; + hob_struct_t *hobfile = (hob_struct_t *) task->hobRegister; - err = ide_no_data_taskfile(drive, &task); + if (task->data_phase == TASKFILE_MULTI_IN || + task->data_phase == TASKFILE_MULTI_OUT) { + if (!drive->mult_count) { + printk(KERN_ERR "%s: multimode not set!\n", drive->name); + return ide_stopped; + } + } - args[0] = task.tf.command; - memcpy(&args[1], &task.tf_array[7], 6); + /* + * (ks) Check taskfile in flags. + * If set, then execute as it is defined. + * If not set, then define default settings. + * The default values are: + * read all taskfile registers (except data) + * read the hob registers (sector, nsector, lcyl, hcyl) + */ + if (task->tf_in_flags.all == 0) { + task->tf_in_flags.all = IDE_TASKFILE_STD_IN_FLAGS; + if (drive->addressing == 1) + task->tf_in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8); + } - if (copy_to_user(p, args, 7)) - err = -EFAULT; + /* ALL Command Block Executions SHALL clear nIEN, unless otherwise */ + if (IDE_CONTROL_REG) + /* clear nIEN */ + hwif->OUTB(drive->ctl, IDE_CONTROL_REG); + SELECT_MASK(drive, 0); + + if (task->tf_out_flags.b.data) { + u16 data = taskfile->data + (hobfile->data << 8); + hwif->OUTW(data, IDE_DATA_REG); + } + + /* (ks) send hob registers first */ + if (task->tf_out_flags.b.nsector_hob) + hwif->OUTB(hobfile->sector_count, IDE_NSECTOR_REG); + if (task->tf_out_flags.b.sector_hob) + hwif->OUTB(hobfile->sector_number, IDE_SECTOR_REG); + if (task->tf_out_flags.b.lcyl_hob) + hwif->OUTB(hobfile->low_cylinder, IDE_LCYL_REG); + if (task->tf_out_flags.b.hcyl_hob) + hwif->OUTB(hobfile->high_cylinder, IDE_HCYL_REG); + + /* (ks) Send now the standard registers */ + if (task->tf_out_flags.b.error_feature) + hwif->OUTB(taskfile->feature, IDE_FEATURE_REG); + /* refers to number of sectors to transfer */ + if (task->tf_out_flags.b.nsector) + hwif->OUTB(taskfile->sector_count, IDE_NSECTOR_REG); + /* refers to sector offset or start sector */ + if (task->tf_out_flags.b.sector) + hwif->OUTB(taskfile->sector_number, IDE_SECTOR_REG); + if (task->tf_out_flags.b.lcyl) + hwif->OUTB(taskfile->low_cylinder, IDE_LCYL_REG); + if (task->tf_out_flags.b.hcyl) + hwif->OUTB(taskfile->high_cylinder, IDE_HCYL_REG); + + /* + * (ks) In the flagged taskfile approch, we will use all specified + * registers and the register value will not be changed, except the + * select bit (master/slave) in the drive_head register. We must make + * sure that the desired drive is selected. + */ + hwif->OUTB(taskfile->device_head | drive->select.all, IDE_SELECT_REG); + switch(task->data_phase) { - return err; + case TASKFILE_OUT_DMAQ: + case TASKFILE_OUT_DMA: + case TASKFILE_IN_DMAQ: + case TASKFILE_IN_DMA: + if (!drive->using_dma) + break; + + if (!hwif->dma_setup(drive)) { + hwif->dma_exec_cmd(drive, taskfile->command); + hwif->dma_start(drive); + return ide_started; + } + break; + + default: + if (task->handler == NULL) + return ide_stopped; + + /* Issue the command */ + if (task->prehandler) { + hwif->OUTBSYNC(drive, taskfile->command, IDE_COMMAND_REG); + ndelay(400); /* FIXME */ + return task->prehandler(drive, task->rq); + } + ide_execute_command(drive, taskfile->command, task->handler, WAIT_WORSTCASE, NULL); + return ide_started; + } + + return ide_stopped; } diff --git a/trunk/drivers/ide/ide.c b/trunk/drivers/ide/ide.c index c6d4f630e18a..54943da6e4e5 100644 --- a/trunk/drivers/ide/ide.c +++ b/trunk/drivers/ide/ide.c @@ -424,6 +424,7 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif) hwif->reset_poll = tmp_hwif->reset_poll; hwif->pre_reset = tmp_hwif->pre_reset; hwif->resetproc = tmp_hwif->resetproc; + hwif->intrproc = tmp_hwif->intrproc; hwif->maskproc = tmp_hwif->maskproc; hwif->quirkproc = tmp_hwif->quirkproc; hwif->busproc = tmp_hwif->busproc; @@ -467,6 +468,7 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif) #endif hwif->dma_base = tmp_hwif->dma_base; + hwif->dma_master = tmp_hwif->dma_master; hwif->dma_command = tmp_hwif->dma_command; hwif->dma_vendor1 = tmp_hwif->dma_vendor1; hwif->dma_status = tmp_hwif->dma_status; @@ -600,6 +602,7 @@ void ide_unregister(unsigned int index) (void) ide_release_dma(hwif); hwif->dma_base = 0; + hwif->dma_master = 0; hwif->dma_command = 0; hwif->dma_vendor1 = 0; hwif->dma_status = 0; @@ -851,7 +854,8 @@ int set_using_dma(ide_drive_t *drive, int arg) err = 0; if (arg) { - if (ide_set_dma(drive)) + hwif->dma_off_quietly(drive); + if (ide_set_dma(drive) || hwif->ide_dma_on(drive)) err = -EIO; } else ide_dma_off(drive); diff --git a/trunk/drivers/ide/mips/au1xxx-ide.c b/trunk/drivers/ide/mips/au1xxx-ide.c index a4d0d4ca73d0..a4ce3ba15d61 100644 --- a/trunk/drivers/ide/mips/au1xxx-ide.c +++ b/trunk/drivers/ide/mips/au1xxx-ide.c @@ -198,6 +198,8 @@ static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed) break; #endif + default: + return; } au_writel(mem_sttime,MEM_STTIME2); diff --git a/trunk/drivers/ide/pci/aec62xx.c b/trunk/drivers/ide/pci/aec62xx.c index 7f4d1857d555..44268504ae43 100644 --- a/trunk/drivers/ide/pci/aec62xx.c +++ b/trunk/drivers/ide/pci/aec62xx.c @@ -202,7 +202,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = { .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_ATAPI_DMA | - IDE_HFLAG_ABUSE_SET_DMA_MODE | IDE_HFLAG_OFF_BOARD, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, @@ -212,7 +211,6 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = { .init_chipset = init_chipset_aec62xx, .init_hwif = init_hwif_aec62xx, .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA | - IDE_HFLAG_ABUSE_SET_DMA_MODE | IDE_HFLAG_OFF_BOARD, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, @@ -222,8 +220,7 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = { .init_chipset = init_chipset_aec62xx, .init_hwif = init_hwif_aec62xx, .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, - .host_flags = IDE_HFLAG_NO_ATAPI_DMA | - IDE_HFLAG_ABUSE_SET_DMA_MODE, + .host_flags = IDE_HFLAG_NO_ATAPI_DMA, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, @@ -231,9 +228,7 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = { .name = "AEC6280", .init_chipset = init_chipset_aec62xx, .init_hwif = init_hwif_aec62xx, - .host_flags = IDE_HFLAG_NO_ATAPI_DMA | - IDE_HFLAG_ABUSE_SET_DMA_MODE | - IDE_HFLAG_OFF_BOARD, + .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, @@ -242,9 +237,7 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = { .init_chipset = init_chipset_aec62xx, .init_hwif = init_hwif_aec62xx, .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, - .host_flags = IDE_HFLAG_NO_ATAPI_DMA | - IDE_HFLAG_ABUSE_SET_DMA_MODE | - IDE_HFLAG_OFF_BOARD, + .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, diff --git a/trunk/drivers/ide/pci/alim15x3.c b/trunk/drivers/ide/pci/alim15x3.c index 49aa82e412b6..ce293936af4b 100644 --- a/trunk/drivers/ide/pci/alim15x3.c +++ b/trunk/drivers/ide/pci/alim15x3.c @@ -402,6 +402,9 @@ static void ali_set_dma_mode(ide_drive_t *drive, const u8 speed) u8 tmpbyte = 0x00; int m5229_udma = (hwif->channel) ? 0x57 : 0x56; + if (speed < XFER_PIO_0) + return; + if (speed == XFER_UDMA_6) speed1 = 0x47; diff --git a/trunk/drivers/ide/pci/amd74xx.c b/trunk/drivers/ide/pci/amd74xx.c index cee51fdafcf6..8d4125ec252c 100644 --- a/trunk/drivers/ide/pci/amd74xx.c +++ b/trunk/drivers/ide/pci/amd74xx.c @@ -266,7 +266,6 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif) #define IDE_HFLAGS_AMD \ (IDE_HFLAG_PIO_NO_BLACKLIST | \ IDE_HFLAG_PIO_NO_DOWNGRADE | \ - IDE_HFLAG_ABUSE_SET_DMA_MODE | \ IDE_HFLAG_POST_SET_MODE | \ IDE_HFLAG_IO_32BIT | \ IDE_HFLAG_UNMASK_IRQS | \ diff --git a/trunk/drivers/ide/pci/atiixp.c b/trunk/drivers/ide/pci/atiixp.c index 5ae26564fb72..ef8e0164ef7a 100644 --- a/trunk/drivers/ide/pci/atiixp.c +++ b/trunk/drivers/ide/pci/atiixp.c @@ -133,6 +133,9 @@ static void atiixp_set_dma_mode(ide_drive_t *drive, const u8 speed) u32 tmp32; u16 tmp16; + if (speed < XFER_MW_DMA_0) + return; + spin_lock_irqsave(&atiixp_lock, flags); save_mdma_mode[drive->dn] = 0; diff --git a/trunk/drivers/ide/pci/cmd64x.c b/trunk/drivers/ide/pci/cmd64x.c index 0b1e9479f019..bc553337b1be 100644 --- a/trunk/drivers/ide/pci/cmd64x.c +++ b/trunk/drivers/ide/pci/cmd64x.c @@ -322,6 +322,8 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed) case XFER_MW_DMA_0: program_cycle_times(drive, 480, 215); break; + default: + return; } if (speed >= XFER_SW_DMA_0) @@ -331,15 +333,14 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed) static int cmd648_ide_dma_end (ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); - unsigned long base = hwif->dma_base - (hwif->channel * 8); int err = __ide_dma_end(drive); u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 : MRDMODE_INTR_CH0; - u8 mrdmode = inb(base + 1); + u8 mrdmode = inb(hwif->dma_master + 0x01); /* clear the interrupt bit */ outb((mrdmode & ~(MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1)) | irq_mask, - base + 1); + hwif->dma_master + 0x01); return err; } @@ -364,11 +365,10 @@ static int cmd64x_ide_dma_end (ide_drive_t *drive) static int cmd648_ide_dma_test_irq (ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); - unsigned long base = hwif->dma_base - (hwif->channel * 8); u8 irq_mask = hwif->channel ? MRDMODE_INTR_CH1 : MRDMODE_INTR_CH0; u8 dma_stat = inb(hwif->dma_status); - u8 mrdmode = inb(base + 1); + u8 mrdmode = inb(hwif->dma_master + 0x01); #ifdef DEBUG printk("%s: dma_stat: 0x%02x mrdmode: 0x%02x irq_mask: 0x%02x\n", diff --git a/trunk/drivers/ide/pci/cs5520.c b/trunk/drivers/ide/pci/cs5520.c index d1a91bcb5b29..0466462fd21b 100644 --- a/trunk/drivers/ide/pci/cs5520.c +++ b/trunk/drivers/ide/pci/cs5520.c @@ -137,7 +137,6 @@ static void __devinit init_hwif_cs5520(ide_hwif_t *hwif) IDE_HFLAG_CS5520 | \ IDE_HFLAG_VDMA | \ IDE_HFLAG_NO_ATAPI_DMA | \ - IDE_HFLAG_ABUSE_SET_DMA_MODE |\ IDE_HFLAG_BOOTABLE, \ .pio_mask = ATA_PIO4, \ } diff --git a/trunk/drivers/ide/pci/cs5530.c b/trunk/drivers/ide/pci/cs5530.c index df5966b33460..547690395eee 100644 --- a/trunk/drivers/ide/pci/cs5530.c +++ b/trunk/drivers/ide/pci/cs5530.c @@ -116,6 +116,8 @@ static void cs5530_set_dma_mode(ide_drive_t *drive, const u8 mode) case XFER_MW_DMA_0: timings = 0x00077771; break; case XFER_MW_DMA_1: timings = 0x00012121; break; case XFER_MW_DMA_2: timings = 0x00002020; break; + default: + return; } basereg = CS5530_BASEREG(drive->hwif); reg = inl(basereg + 4); /* get drive0 config register */ diff --git a/trunk/drivers/ide/pci/cs5535.c b/trunk/drivers/ide/pci/cs5535.c index 50b3d7791f55..ddcbeba671e1 100644 --- a/trunk/drivers/ide/pci/cs5535.c +++ b/trunk/drivers/ide/pci/cs5535.c @@ -190,7 +190,7 @@ static const struct ide_port_info cs5535_chipset __devinitdata = { .name = "CS5535", .init_hwif = init_hwif_cs5535, .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE | - IDE_HFLAG_ABUSE_SET_DMA_MODE | IDE_HFLAG_BOOTABLE, + IDE_HFLAG_BOOTABLE, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, diff --git a/trunk/drivers/ide/pci/hpt34x.c b/trunk/drivers/ide/pci/hpt34x.c index dfba0d13fcd3..ae6307fae4f9 100644 --- a/trunk/drivers/ide/pci/hpt34x.c +++ b/trunk/drivers/ide/pci/hpt34x.c @@ -129,18 +129,14 @@ static void __devinit init_hwif_hpt34x(ide_hwif_t *hwif) hwif->set_dma_mode = &hpt34x_set_mode; } -#define IDE_HFLAGS_HPT34X \ - (IDE_HFLAG_NO_ATAPI_DMA | \ - IDE_HFLAG_ABUSE_SET_DMA_MODE | \ - IDE_HFLAG_NO_AUTODMA) - static const struct ide_port_info hpt34x_chipsets[] __devinitdata = { { /* 0 */ .name = "HPT343", .init_chipset = init_chipset_hpt34x, .init_hwif = init_hwif_hpt34x, .extra = 16, - .host_flags = IDE_HFLAGS_HPT34X, + .host_flags = IDE_HFLAG_NO_ATAPI_DMA | + IDE_HFLAG_NO_AUTODMA, .pio_mask = ATA_PIO5, }, { /* 1 */ @@ -148,7 +144,9 @@ static const struct ide_port_info hpt34x_chipsets[] __devinitdata = { .init_chipset = init_chipset_hpt34x, .init_hwif = init_hwif_hpt34x, .extra = 16, - .host_flags = IDE_HFLAGS_HPT34X | IDE_HFLAG_OFF_BOARD, + .host_flags = IDE_HFLAG_NO_ATAPI_DMA | + IDE_HFLAG_NO_AUTODMA | + IDE_HFLAG_OFF_BOARD, .pio_mask = ATA_PIO5, #ifdef CONFIG_HPT34X_AUTODMA .swdma_mask = ATA_SWDMA2, diff --git a/trunk/drivers/ide/pci/hpt366.c b/trunk/drivers/ide/pci/hpt366.c index 3777fb8c8043..9fce25bdec8a 100644 --- a/trunk/drivers/ide/pci/hpt366.c +++ b/trunk/drivers/ide/pci/hpt366.c @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/pci/hpt366.c Version 1.30 Dec 12, 2007 + * linux/drivers/ide/pci/hpt366.c Version 1.22 Dec 4, 2007 * * Copyright (C) 1999-2003 Andre Hedrick * Portions Copyright (C) 2001 Sun Microsystems, Inc. @@ -88,7 +88,7 @@ * - rename all the register related variables consistently * - move all the interrupt twiddling code from the speedproc handlers into * init_hwif_hpt366(), also grouping all the DMA related code together there - * - merge HPT36x/HPT37x speedproc handlers, fix PIO timing register mask and + * - merge two HPT37x speedproc handlers, fix the PIO timing register mask and * separate the UltraDMA and MWDMA masks there to avoid changing PIO timings * when setting an UltraDMA mode * - fix hpt3xx_tune_drive() to set the PIO mode requested, not always select @@ -458,13 +458,6 @@ enum ata_clock { NUM_ATA_CLOCKS }; -struct hpt_timings { - u32 pio_mask; - u32 dma_mask; - u32 ultra_mask; - u32 *clock_table[NUM_ATA_CLOCKS]; -}; - /* * Hold all the HighPoint chip information in one place. */ @@ -475,8 +468,7 @@ struct hpt_info { u8 udma_mask; /* Allowed UltraDMA modes mask. */ u8 dpll_clk; /* DPLL clock in MHz */ u8 pci_clk; /* PCI clock in MHz */ - struct hpt_timings *timings; /* Chipset timing data */ - u8 clock; /* ATA clock selected */ + u32 **settings; /* Chipset settings table */ }; /* Supported HighPoint chips */ @@ -494,30 +486,20 @@ enum { HPT371N }; -static struct hpt_timings hpt36x_timings = { - .pio_mask = 0xc1f8ffff, - .dma_mask = 0x303800ff, - .ultra_mask = 0x30070000, - .clock_table = { - [ATA_CLOCK_25MHZ] = twenty_five_base_hpt36x, - [ATA_CLOCK_33MHZ] = thirty_three_base_hpt36x, - [ATA_CLOCK_40MHZ] = forty_base_hpt36x, - [ATA_CLOCK_50MHZ] = NULL, - [ATA_CLOCK_66MHZ] = NULL - } +static u32 *hpt36x_settings[NUM_ATA_CLOCKS] = { + twenty_five_base_hpt36x, + thirty_three_base_hpt36x, + forty_base_hpt36x, + NULL, + NULL }; -static struct hpt_timings hpt37x_timings = { - .pio_mask = 0xcfc3ffff, - .dma_mask = 0x31c001ff, - .ultra_mask = 0x303c0000, - .clock_table = { - [ATA_CLOCK_25MHZ] = NULL, - [ATA_CLOCK_33MHZ] = thirty_three_base_hpt37x, - [ATA_CLOCK_40MHZ] = NULL, - [ATA_CLOCK_50MHZ] = fifty_base_hpt37x, - [ATA_CLOCK_66MHZ] = sixty_six_base_hpt37x - } +static u32 *hpt37x_settings[NUM_ATA_CLOCKS] = { + NULL, + thirty_three_base_hpt37x, + NULL, + fifty_base_hpt37x, + sixty_six_base_hpt37x }; static const struct hpt_info hpt36x __devinitdata = { @@ -525,7 +507,7 @@ static const struct hpt_info hpt36x __devinitdata = { .chip_type = HPT36x, .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2, .dpll_clk = 0, /* no DPLL */ - .timings = &hpt36x_timings + .settings = hpt36x_settings }; static const struct hpt_info hpt370 __devinitdata = { @@ -533,7 +515,7 @@ static const struct hpt_info hpt370 __devinitdata = { .chip_type = HPT370, .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, .dpll_clk = 48, - .timings = &hpt37x_timings + .settings = hpt37x_settings }; static const struct hpt_info hpt370a __devinitdata = { @@ -541,7 +523,7 @@ static const struct hpt_info hpt370a __devinitdata = { .chip_type = HPT370A, .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4, .dpll_clk = 48, - .timings = &hpt37x_timings + .settings = hpt37x_settings }; static const struct hpt_info hpt374 __devinitdata = { @@ -549,7 +531,7 @@ static const struct hpt_info hpt374 __devinitdata = { .chip_type = HPT374, .udma_mask = ATA_UDMA5, .dpll_clk = 48, - .timings = &hpt37x_timings + .settings = hpt37x_settings }; static const struct hpt_info hpt372 __devinitdata = { @@ -557,7 +539,7 @@ static const struct hpt_info hpt372 __devinitdata = { .chip_type = HPT372, .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 55, - .timings = &hpt37x_timings + .settings = hpt37x_settings }; static const struct hpt_info hpt372a __devinitdata = { @@ -565,7 +547,7 @@ static const struct hpt_info hpt372a __devinitdata = { .chip_type = HPT372A, .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 66, - .timings = &hpt37x_timings + .settings = hpt37x_settings }; static const struct hpt_info hpt302 __devinitdata = { @@ -573,7 +555,7 @@ static const struct hpt_info hpt302 __devinitdata = { .chip_type = HPT302, .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 66, - .timings = &hpt37x_timings + .settings = hpt37x_settings }; static const struct hpt_info hpt371 __devinitdata = { @@ -581,7 +563,7 @@ static const struct hpt_info hpt371 __devinitdata = { .chip_type = HPT371, .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 66, - .timings = &hpt37x_timings + .settings = hpt37x_settings }; static const struct hpt_info hpt372n __devinitdata = { @@ -589,7 +571,7 @@ static const struct hpt_info hpt372n __devinitdata = { .chip_type = HPT372N, .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 77, - .timings = &hpt37x_timings + .settings = hpt37x_settings }; static const struct hpt_info hpt302n __devinitdata = { @@ -597,7 +579,7 @@ static const struct hpt_info hpt302n __devinitdata = { .chip_type = HPT302N, .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 77, - .timings = &hpt37x_timings + .settings = hpt37x_settings }; static const struct hpt_info hpt371n __devinitdata = { @@ -605,7 +587,7 @@ static const struct hpt_info hpt371n __devinitdata = { .chip_type = HPT371N, .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5, .dpll_clk = 77, - .timings = &hpt37x_timings + .settings = hpt37x_settings }; static int check_in_drive_list(ide_drive_t *drive, const char **list) @@ -693,33 +675,71 @@ static u32 get_speed_setting(u8 speed, struct hpt_info *info) for (i = 0; i < ARRAY_SIZE(xfer_speeds) - 1; i++) if (xfer_speeds[i] == speed) break; - - return info->timings->clock_table[info->clock][i]; + /* + * NOTE: info->settings only points to the pointer + * to the list of the actual register values + */ + return (*info->settings)[i]; } -static void hpt3xx_set_mode(ide_drive_t *drive, const u8 speed) +static void hpt36x_set_mode(ide_drive_t *drive, const u8 speed) { - struct pci_dev *dev = HWIF(drive)->pci_dev; + ide_hwif_t *hwif = HWIF(drive); + struct pci_dev *dev = hwif->pci_dev; struct hpt_info *info = pci_get_drvdata(dev); - struct hpt_timings *t = info->timings; - u8 itr_addr = 0x40 + (drive->dn * 4); + u8 itr_addr = drive->dn ? 0x44 : 0x40; u32 old_itr = 0; - u32 new_itr = get_speed_setting(speed, info); - u32 itr_mask = speed < XFER_MW_DMA_0 ? t->pio_mask : - (speed < XFER_UDMA_0 ? t->dma_mask : - t->ultra_mask); + u32 itr_mask, new_itr; + + itr_mask = speed < XFER_MW_DMA_0 ? 0x30070000 : + (speed < XFER_UDMA_0 ? 0xc0070000 : 0xc03800ff); + + new_itr = get_speed_setting(speed, info); - pci_read_config_dword(dev, itr_addr, &old_itr); - new_itr = (old_itr & ~itr_mask) | (new_itr & itr_mask); /* * Disable on-chip PIO FIFO/buffer (and PIO MST mode as well) * to avoid problems handling I/O errors later */ + pci_read_config_dword(dev, itr_addr, &old_itr); + new_itr = (new_itr & ~itr_mask) | (old_itr & itr_mask); new_itr &= ~0xc0000000; pci_write_config_dword(dev, itr_addr, new_itr); } +static void hpt37x_set_mode(ide_drive_t *drive, const u8 speed) +{ + ide_hwif_t *hwif = HWIF(drive); + struct pci_dev *dev = hwif->pci_dev; + struct hpt_info *info = pci_get_drvdata(dev); + u8 itr_addr = 0x40 + (drive->dn * 4); + u32 old_itr = 0; + u32 itr_mask, new_itr; + + itr_mask = speed < XFER_MW_DMA_0 ? 0x303c0000 : + (speed < XFER_UDMA_0 ? 0xc03c0000 : 0xc1c001ff); + + new_itr = get_speed_setting(speed, info); + + pci_read_config_dword(dev, itr_addr, &old_itr); + new_itr = (new_itr & ~itr_mask) | (old_itr & itr_mask); + + if (speed < XFER_MW_DMA_0) + new_itr &= ~0x80000000; /* Disable on-chip PIO FIFO/buffer */ + pci_write_config_dword(dev, itr_addr, new_itr); +} + +static void hpt3xx_set_mode(ide_drive_t *drive, const u8 speed) +{ + ide_hwif_t *hwif = HWIF(drive); + struct hpt_info *info = pci_get_drvdata(hwif->pci_dev); + + if (info->chip_type >= HPT370) + hpt37x_set_mode(drive, speed); + else /* hpt368: hpt_minimum_revision(dev, 2) */ + hpt36x_set_mode(drive, speed); +} + static void hpt3xx_set_pio_mode(ide_drive_t *drive, const u8 pio) { hpt3xx_set_mode(drive, XFER_PIO_0 + pio); @@ -736,6 +756,15 @@ static int hpt3xx_quirkproc(ide_drive_t *drive) return 0; } +static void hpt3xx_intrproc(ide_drive_t *drive) +{ + if (drive->quirk_list) + return; + + /* drives in the quirk_list may not like intr setups/cleanups */ + outb(drive->ctl | 2, IDE_CONTROL_REG); +} + static void hpt3xx_maskproc(ide_drive_t *drive, int mask) { ide_hwif_t *hwif = HWIF(drive); @@ -885,33 +914,32 @@ static int hpt374_ide_dma_end(ide_drive_t *drive) static void hpt3xxn_set_clock(ide_hwif_t *hwif, u8 mode) { - unsigned long base = hwif->extra_base; - u8 scr2 = inb(base + 0x6b); + u8 scr2 = inb(hwif->dma_master + 0x7b); if ((scr2 & 0x7f) == mode) return; /* Tristate the bus */ - outb(0x80, base + 0x63); - outb(0x80, base + 0x67); + outb(0x80, hwif->dma_master + 0x73); + outb(0x80, hwif->dma_master + 0x77); /* Switch clock and reset channels */ - outb(mode, base + 0x6b); - outb(0xc0, base + 0x69); + outb(mode, hwif->dma_master + 0x7b); + outb(0xc0, hwif->dma_master + 0x79); /* * Reset the state machines. * NOTE: avoid accidentally enabling the disabled channels. */ - outb(inb(base + 0x60) | 0x32, base + 0x60); - outb(inb(base + 0x64) | 0x32, base + 0x64); + outb(inb(hwif->dma_master + 0x70) | 0x32, hwif->dma_master + 0x70); + outb(inb(hwif->dma_master + 0x74) | 0x32, hwif->dma_master + 0x74); /* Complete reset */ - outb(0x00, base + 0x69); + outb(0x00, hwif->dma_master + 0x79); /* Reconnect channels to bus */ - outb(0x00, base + 0x63); - outb(0x00, base + 0x67); + outb(0x00, hwif->dma_master + 0x73); + outb(0x00, hwif->dma_master + 0x77); } /** @@ -1182,7 +1210,7 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha * We also don't like using the DPLL because this causes glitches * on PRST-/SRST- when the state engine gets reset... */ - if (chip_type >= HPT374 || info->timings->clock_table[clock] == NULL) { + if (chip_type >= HPT374 || info->settings[clock] == NULL) { u16 f_low, delta = pci_clk < 50 ? 2 : 4; int adjust; @@ -1198,7 +1226,7 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha clock = ATA_CLOCK_50MHZ; } - if (info->timings->clock_table[clock] == NULL) { + if (info->settings[clock] == NULL) { printk(KERN_ERR "%s: unknown bus timing!\n", name); kfree(info); return -EIO; @@ -1239,10 +1267,15 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha printk("%s: using %d MHz PCI clock\n", name, pci_clk); } + /* + * Advance the table pointer to a slot which points to the list + * of the register values settings matching the clock being used. + */ + info->settings += clock; + /* Store the clock frequencies. */ info->dpll_clk = dpll_clk; info->pci_clk = pci_clk; - info->clock = clock; /* Point to this chip's own instance of the hpt_info structure. */ pci_set_drvdata(dev, info); @@ -1287,8 +1320,8 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif) hwif->set_pio_mode = &hpt3xx_set_pio_mode; hwif->set_dma_mode = &hpt3xx_set_mode; - hwif->quirkproc = &hpt3xx_quirkproc; + hwif->intrproc = &hpt3xx_intrproc; hwif->maskproc = &hpt3xx_maskproc; hwif->busproc = &hpt3xx_busproc; @@ -1461,11 +1494,6 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2) return 0; } -#define IDE_HFLAGS_HPT3XX \ - (IDE_HFLAG_NO_ATAPI_DMA | \ - IDE_HFLAG_ABUSE_SET_DMA_MODE | \ - IDE_HFLAG_OFF_BOARD) - static const struct ide_port_info hpt366_chipsets[] __devinitdata = { { /* 0 */ .name = "HPT36x", @@ -1480,7 +1508,9 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = { */ .enablebits = {{0x50,0x10,0x10}, {0x54,0x04,0x04}}, .extra = 240, - .host_flags = IDE_HFLAGS_HPT3XX | IDE_HFLAG_SINGLE, + .host_flags = IDE_HFLAG_SINGLE | + IDE_HFLAG_NO_ATAPI_DMA | + IDE_HFLAG_OFF_BOARD, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, },{ /* 1 */ @@ -1490,7 +1520,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = { .init_dma = init_dma_hpt366, .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, .extra = 240, - .host_flags = IDE_HFLAGS_HPT3XX, + .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, },{ /* 2 */ @@ -1500,7 +1530,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = { .init_dma = init_dma_hpt366, .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, .extra = 240, - .host_flags = IDE_HFLAGS_HPT3XX, + .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, },{ /* 3 */ @@ -1510,7 +1540,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = { .init_dma = init_dma_hpt366, .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, .extra = 240, - .host_flags = IDE_HFLAGS_HPT3XX, + .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, },{ /* 4 */ @@ -1521,7 +1551,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = { .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, .udma_mask = ATA_UDMA5, .extra = 240, - .host_flags = IDE_HFLAGS_HPT3XX, + .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, },{ /* 5 */ @@ -1531,7 +1561,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = { .init_dma = init_dma_hpt366, .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, .extra = 240, - .host_flags = IDE_HFLAGS_HPT3XX, + .host_flags = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_OFF_BOARD, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, } diff --git a/trunk/drivers/ide/pci/it8213.c b/trunk/drivers/ide/pci/it8213.c index 2a0f45c4f4c4..90b52ed37bfc 100644 --- a/trunk/drivers/ide/pci/it8213.c +++ b/trunk/drivers/ide/pci/it8213.c @@ -101,11 +101,24 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed) pci_read_config_byte(dev, 0x54, ®54); pci_read_config_byte(dev, 0x55, ®55); - if (speed >= XFER_UDMA_0) { - u8 udma = speed - XFER_UDMA_0; - - u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4); + switch(speed) { + case XFER_UDMA_6: + case XFER_UDMA_4: + case XFER_UDMA_2: u_speed = 2 << (drive->dn * 4); break; + case XFER_UDMA_5: + case XFER_UDMA_3: + case XFER_UDMA_1: u_speed = 1 << (drive->dn * 4); break; + case XFER_UDMA_0: u_speed = 0 << (drive->dn * 4); break; + break; + case XFER_MW_DMA_2: + case XFER_MW_DMA_1: + case XFER_SW_DMA_2: + break; + default: + return; + } + if (speed >= XFER_UDMA_0) { if (!(reg48 & u_flag)) pci_write_config_byte(dev, 0x48, reg48 | u_flag); if (speed >= XFER_UDMA_5) { diff --git a/trunk/drivers/ide/pci/pdc202xx_new.c b/trunk/drivers/ide/pci/pdc202xx_new.c index ef4a99b99d1f..2b4f44e45a1a 100644 --- a/trunk/drivers/ide/pci/pdc202xx_new.c +++ b/trunk/drivers/ide/pci/pdc202xx_new.c @@ -146,7 +146,7 @@ static struct udma_timing { { 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */ }; -static void pdcnew_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void pdcnew_set_mode(ide_drive_t *drive, const u8 speed) { ide_hwif_t *hwif = HWIF(drive); u8 adj = (drive->dn & 1) ? 0x08 : 0x00; @@ -162,18 +162,45 @@ static void pdcnew_set_dma_mode(ide_drive_t *drive, const u8 speed) if (max_dma_rate(hwif->pci_dev) == 4) { u8 mode = speed & 0x07; - if (speed >= XFER_UDMA_0) { - set_indexed_reg(hwif, 0x10 + adj, - udma_timings[mode].reg10); - set_indexed_reg(hwif, 0x11 + adj, - udma_timings[mode].reg11); - set_indexed_reg(hwif, 0x12 + adj, - udma_timings[mode].reg12); - } else { - set_indexed_reg(hwif, 0x0e + adj, - mwdma_timings[mode].reg0e); - set_indexed_reg(hwif, 0x0f + adj, - mwdma_timings[mode].reg0f); + switch (speed) { + case XFER_UDMA_6: + case XFER_UDMA_5: + case XFER_UDMA_4: + case XFER_UDMA_3: + case XFER_UDMA_2: + case XFER_UDMA_1: + case XFER_UDMA_0: + set_indexed_reg(hwif, 0x10 + adj, + udma_timings[mode].reg10); + set_indexed_reg(hwif, 0x11 + adj, + udma_timings[mode].reg11); + set_indexed_reg(hwif, 0x12 + adj, + udma_timings[mode].reg12); + break; + + case XFER_MW_DMA_2: + case XFER_MW_DMA_1: + case XFER_MW_DMA_0: + set_indexed_reg(hwif, 0x0e + adj, + mwdma_timings[mode].reg0e); + set_indexed_reg(hwif, 0x0f + adj, + mwdma_timings[mode].reg0f); + break; + case XFER_PIO_4: + case XFER_PIO_3: + case XFER_PIO_2: + case XFER_PIO_1: + case XFER_PIO_0: + set_indexed_reg(hwif, 0x0c + adj, + pio_timings[mode].reg0c); + set_indexed_reg(hwif, 0x0d + adj, + pio_timings[mode].reg0d); + set_indexed_reg(hwif, 0x13 + adj, + pio_timings[mode].reg13); + break; + default: + printk(KERN_ERR "pdc202xx_new: " + "Unknown speed %d ignored\n", speed); } } else if (speed == XFER_UDMA_2) { /* Set tHOLD bit to 0 if using UDMA mode 2 */ @@ -185,14 +212,7 @@ static void pdcnew_set_dma_mode(ide_drive_t *drive, const u8 speed) static void pdcnew_set_pio_mode(ide_drive_t *drive, const u8 pio) { - ide_hwif_t *hwif = drive->hwif; - u8 adj = (drive->dn & 1) ? 0x08 : 0x00; - - if (max_dma_rate(hwif->pci_dev) == 4) { - set_indexed_reg(hwif, 0x0c + adj, pio_timings[pio].reg0c); - set_indexed_reg(hwif, 0x0d + adj, pio_timings[pio].reg0d); - set_indexed_reg(hwif, 0x13 + adj, pio_timings[pio].reg13); - } + pdcnew_set_mode(drive, XFER_PIO_0 + pio); } static u8 pdcnew_cable_detect(ide_hwif_t *hwif) @@ -446,7 +466,7 @@ static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const cha static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif) { hwif->set_pio_mode = &pdcnew_set_pio_mode; - hwif->set_dma_mode = &pdcnew_set_dma_mode; + hwif->set_dma_mode = &pdcnew_set_mode; hwif->quirkproc = &pdcnew_quirkproc; hwif->resetproc = &pdcnew_reset; diff --git a/trunk/drivers/ide/pci/pdc202xx_old.c b/trunk/drivers/ide/pci/pdc202xx_old.c index 67b2781e2213..e09742e2ba59 100644 --- a/trunk/drivers/ide/pci/pdc202xx_old.c +++ b/trunk/drivers/ide/pci/pdc202xx_old.c @@ -162,7 +162,7 @@ static u8 pdc202xx_old_cable_detect (ide_hwif_t *hwif) */ static void pdc_old_enable_66MHz_clock(ide_hwif_t *hwif) { - unsigned long clock_reg = hwif->extra_base + 0x01; + unsigned long clock_reg = hwif->dma_master + 0x11; u8 clock = inb(clock_reg); outb(clock | (hwif->channel ? 0x08 : 0x02), clock_reg); @@ -170,7 +170,7 @@ static void pdc_old_enable_66MHz_clock(ide_hwif_t *hwif) static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif) { - unsigned long clock_reg = hwif->extra_base + 0x01; + unsigned long clock_reg = hwif->dma_master + 0x11; u8 clock = inb(clock_reg); outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg); @@ -193,7 +193,7 @@ static void pdc202xx_old_ide_dma_start(ide_drive_t *drive) if (drive->media != ide_disk || drive->addressing == 1) { struct request *rq = HWGROUP(drive)->rq; ide_hwif_t *hwif = HWIF(drive); - unsigned long high_16 = hwif->extra_base - 16; + unsigned long high_16 = hwif->dma_master; unsigned long atapi_reg = high_16 + (hwif->channel ? 0x24 : 0x20); u32 word_count = 0; u8 clock = inb(high_16 + 0x11); @@ -212,7 +212,7 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive) { if (drive->media != ide_disk || drive->addressing == 1) { ide_hwif_t *hwif = HWIF(drive); - unsigned long high_16 = hwif->extra_base - 16; + unsigned long high_16 = hwif->dma_master; unsigned long atapi_reg = high_16 + (hwif->channel ? 0x24 : 0x20); u8 clock = 0; @@ -228,7 +228,7 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive) static int pdc202xx_old_ide_dma_test_irq(ide_drive_t *drive) { ide_hwif_t *hwif = HWIF(drive); - unsigned long high_16 = hwif->extra_base - 16; + unsigned long high_16 = hwif->dma_master; u8 dma_stat = inb(hwif->dma_status); u8 sc1d = inb(high_16 + 0x001d); @@ -271,7 +271,7 @@ static void pdc202xx_dma_timeout(ide_drive_t *drive) static void pdc202xx_reset_host (ide_hwif_t *hwif) { - unsigned long high_16 = hwif->extra_base - 16; + unsigned long high_16 = hwif->dma_master; u8 udma_speed_flag = inb(high_16 | 0x001f); outb(udma_speed_flag | 0x10, high_16 | 0x001f); @@ -375,11 +375,6 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev, } } -#define IDE_HFLAGS_PDC202XX \ - (IDE_HFLAG_ERROR_STOPS_FIFO | \ - IDE_HFLAG_ABUSE_SET_DMA_MODE | \ - IDE_HFLAG_OFF_BOARD) - #define DECLARE_PDC2026X_DEV(name_str, udma, extra_flags) \ { \ .name = name_str, \ @@ -387,7 +382,9 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev, .init_hwif = init_hwif_pdc202xx, \ .init_dma = init_dma_pdc202xx, \ .extra = 48, \ - .host_flags = IDE_HFLAGS_PDC202XX | extra_flags, \ + .host_flags = IDE_HFLAG_ERROR_STOPS_FIFO | \ + extra_flags | \ + IDE_HFLAG_OFF_BOARD, \ .pio_mask = ATA_PIO4, \ .mwdma_mask = ATA_MWDMA2, \ .udma_mask = udma, \ @@ -400,7 +397,8 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = { .init_hwif = init_hwif_pdc202xx, .init_dma = init_dma_pdc202xx, .extra = 16, - .host_flags = IDE_HFLAGS_PDC202XX, + .host_flags = IDE_HFLAG_ERROR_STOPS_FIFO | + IDE_HFLAG_OFF_BOARD, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, diff --git a/trunk/drivers/ide/pci/piix.c b/trunk/drivers/ide/pci/piix.c index bd6d3f77d30c..27781d294cea 100644 --- a/trunk/drivers/ide/pci/piix.c +++ b/trunk/drivers/ide/pci/piix.c @@ -203,11 +203,20 @@ static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed) pci_read_config_byte(dev, 0x54, ®54); pci_read_config_byte(dev, 0x55, ®55); - if (speed >= XFER_UDMA_0) { - u8 udma = speed - XFER_UDMA_0; - - u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4); + switch(speed) { + case XFER_UDMA_4: + case XFER_UDMA_2: u_speed = 2 << (drive->dn * 4); break; + case XFER_UDMA_5: + case XFER_UDMA_3: + case XFER_UDMA_1: u_speed = 1 << (drive->dn * 4); break; + case XFER_UDMA_0: u_speed = 0 << (drive->dn * 4); break; + case XFER_MW_DMA_2: + case XFER_MW_DMA_1: + case XFER_SW_DMA_2: break; + default: return; + } + if (speed >= XFER_UDMA_0) { if (!(reg48 & u_flag)) pci_write_config_byte(dev, 0x48, reg48 | u_flag); if (speed == XFER_UDMA_5) { diff --git a/trunk/drivers/ide/pci/sc1200.c b/trunk/drivers/ide/pci/sc1200.c index fef20bd4aa78..707d5ff66b03 100644 --- a/trunk/drivers/ide/pci/sc1200.c +++ b/trunk/drivers/ide/pci/sc1200.c @@ -135,29 +135,59 @@ static void sc1200_set_dma_mode(ide_drive_t *drive, const u8 mode) unsigned short pci_clock; unsigned int basereg = hwif->channel ? 0x50 : 0x40; - static const u32 udma_timing[3][3] = { - { 0x00921250, 0x00911140, 0x00911030 }, - { 0x00932470, 0x00922260, 0x00922140 }, - { 0x009436a1, 0x00933481, 0x00923261 }, - }; - - static const u32 mwdma_timing[3][3] = { - { 0x00077771, 0x00012121, 0x00002020 }, - { 0x000bbbb2, 0x00024241, 0x00013131 }, - { 0x000ffff3, 0x00035352, 0x00015151 }, - }; - pci_clock = sc1200_get_pci_clock(); /* * Note that each DMA mode has several timings associated with it. * The correct timing depends on the fast PCI clock freq. */ - - if (mode >= XFER_UDMA_0) - timings = udma_timing[pci_clock][mode - XFER_UDMA_0]; - else - timings = mwdma_timing[pci_clock][mode - XFER_MW_DMA_0]; + timings = 0; + switch (mode) { + case XFER_UDMA_0: + switch (pci_clock) { + case PCI_CLK_33: timings = 0x00921250; break; + case PCI_CLK_48: timings = 0x00932470; break; + case PCI_CLK_66: timings = 0x009436a1; break; + } + break; + case XFER_UDMA_1: + switch (pci_clock) { + case PCI_CLK_33: timings = 0x00911140; break; + case PCI_CLK_48: timings = 0x00922260; break; + case PCI_CLK_66: timings = 0x00933481; break; + } + break; + case XFER_UDMA_2: + switch (pci_clock) { + case PCI_CLK_33: timings = 0x00911030; break; + case PCI_CLK_48: timings = 0x00922140; break; + case PCI_CLK_66: timings = 0x00923261; break; + } + break; + case XFER_MW_DMA_0: + switch (pci_clock) { + case PCI_CLK_33: timings = 0x00077771; break; + case PCI_CLK_48: timings = 0x000bbbb2; break; + case PCI_CLK_66: timings = 0x000ffff3; break; + } + break; + case XFER_MW_DMA_1: + switch (pci_clock) { + case PCI_CLK_33: timings = 0x00012121; break; + case PCI_CLK_48: timings = 0x00024241; break; + case PCI_CLK_66: timings = 0x00035352; break; + } + break; + case XFER_MW_DMA_2: + switch (pci_clock) { + case PCI_CLK_33: timings = 0x00002020; break; + case PCI_CLK_48: timings = 0x00013131; break; + case PCI_CLK_66: timings = 0x00015151; break; + } + break; + default: + return; + } if (unit == 0) { /* are we configuring drive0? */ pci_read_config_dword(hwif->pci_dev, basereg+4, ®); @@ -230,40 +260,67 @@ static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio) } #ifdef CONFIG_PM -struct sc1200_saved_state { - u32 regs[8]; -}; +static ide_hwif_t *lookup_pci_dev (ide_hwif_t *prev, struct pci_dev *dev) +{ + int h; + + for (h = 0; h < MAX_HWIFS; h++) { + ide_hwif_t *hwif = &ide_hwifs[h]; + if (prev) { + if (hwif == prev) + prev = NULL; // found previous, now look for next match + } else { + if (hwif && hwif->pci_dev == dev) + return hwif; // found next match + } + } + return NULL; // not found +} + +typedef struct sc1200_saved_state_s { + __u32 regs[4]; +} sc1200_saved_state_t; + static int sc1200_suspend (struct pci_dev *dev, pm_message_t state) { + ide_hwif_t *hwif = NULL; + printk("SC1200: suspend(%u)\n", state.event); - /* - * we only save state when going from full power to less - */ if (state.event == PM_EVENT_ON) { - struct sc1200_saved_state *ss; - unsigned int r; - - /* - * allocate a permanent save area, if not already allocated - */ - ss = (struct sc1200_saved_state *)pci_get_drvdata(dev); - if (ss == NULL) { - ss = kmalloc(sizeof(*ss), GFP_KERNEL); - if (ss == NULL) - return -ENOMEM; - pci_set_drvdata(dev, ss); + // we only save state when going from full power to less + + // + // Loop over all interfaces that are part of this PCI device: + // + while ((hwif = lookup_pci_dev(hwif, dev)) != NULL) { + sc1200_saved_state_t *ss; + unsigned int basereg, r; + // + // allocate a permanent save area, if not already allocated + // + ss = (sc1200_saved_state_t *)hwif->config_data; + if (ss == NULL) { + ss = kmalloc(sizeof(sc1200_saved_state_t), GFP_KERNEL); + if (ss == NULL) + return -ENOMEM; + hwif->config_data = (unsigned long)ss; + } + ss = (sc1200_saved_state_t *)hwif->config_data; + // + // Save timing registers: this may be unnecessary if + // BIOS also does it + // + basereg = hwif->channel ? 0x50 : 0x40; + for (r = 0; r < 4; ++r) { + pci_read_config_dword (hwif->pci_dev, basereg + (r<<2), &ss->regs[r]); + } } - - /* - * save timing registers - * (this may be unnecessary if BIOS also does it) - */ - for (r = 0; r < 8; r++) - pci_read_config_dword(dev, 0x40 + r * 4, &ss->regs[r]); } + /* You don't need to iterate over disks -- sysfs should have done that for you already */ + pci_disable_device(dev); pci_set_power_state(dev, pci_choose_state(dev, state)); return 0; @@ -271,25 +328,30 @@ static int sc1200_suspend (struct pci_dev *dev, pm_message_t state) static int sc1200_resume (struct pci_dev *dev) { - struct sc1200_saved_state *ss; - unsigned int r; - int i; + ide_hwif_t *hwif = NULL; + int i; i = pci_enable_device(dev); if (i) return i; - ss = (struct sc1200_saved_state *)pci_get_drvdata(dev); - - /* - * restore timing registers - * (this may be unnecessary if BIOS also does it) - */ - if (ss) { - for (r = 0; r < 8; r++) - pci_write_config_dword(dev, 0x40 + r * 4, ss->regs[r]); + // + // loop over all interfaces that are part of this pci device: + // + while ((hwif = lookup_pci_dev(hwif, dev)) != NULL) { + unsigned int basereg, r; + sc1200_saved_state_t *ss = (sc1200_saved_state_t *)hwif->config_data; + + // + // Restore timing registers: this may be unnecessary if BIOS also does it + // + basereg = hwif->channel ? 0x50 : 0x40; + if (ss != NULL) { + for (r = 0; r < 4; ++r) { + pci_write_config_dword(hwif->pci_dev, basereg + (r<<2), ss->regs[r]); + } + } } - return 0; } #endif diff --git a/trunk/drivers/ide/pci/scc_pata.c b/trunk/drivers/ide/pci/scc_pata.c index 24a85bbcd2a6..ebb7132b9b84 100644 --- a/trunk/drivers/ide/pci/scc_pata.c +++ b/trunk/drivers/ide/pci/scc_pata.c @@ -254,7 +254,19 @@ static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed) offset = 0; /* 100MHz */ } - idx = speed - XFER_UDMA_0; + switch (speed) { + case XFER_UDMA_6: + case XFER_UDMA_5: + case XFER_UDMA_4: + case XFER_UDMA_3: + case XFER_UDMA_2: + case XFER_UDMA_1: + case XFER_UDMA_0: + idx = speed - XFER_UDMA_0; + break; + default: + return; + } jcactsel = JCACTSELtbl[offset][idx]; if (is_slave) { diff --git a/trunk/drivers/ide/pci/serverworks.c b/trunk/drivers/ide/pci/serverworks.c index e9bd269547bb..a7280311357b 100644 --- a/trunk/drivers/ide/pci/serverworks.c +++ b/trunk/drivers/ide/pci/serverworks.c @@ -366,17 +366,12 @@ static void __devinit init_hwif_svwks (ide_hwif_t *hwif) } } -#define IDE_HFLAGS_SVWKS \ - (IDE_HFLAG_LEGACY_IRQS | \ - IDE_HFLAG_ABUSE_SET_DMA_MODE | \ - IDE_HFLAG_BOOTABLE) - static const struct ide_port_info serverworks_chipsets[] __devinitdata = { { /* 0 */ .name = "SvrWks OSB4", .init_chipset = init_chipset_svwks, .init_hwif = init_hwif_svwks, - .host_flags = IDE_HFLAGS_SVWKS, + .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = 0x00, /* UDMA is problematic on OSB4 */ @@ -384,7 +379,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = { .name = "SvrWks CSB5", .init_chipset = init_chipset_svwks, .init_hwif = init_hwif_svwks, - .host_flags = IDE_HFLAGS_SVWKS, + .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, @@ -392,7 +387,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = { .name = "SvrWks CSB6", .init_chipset = init_chipset_svwks, .init_hwif = init_hwif_svwks, - .host_flags = IDE_HFLAGS_SVWKS, + .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_BOOTABLE, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, @@ -400,7 +395,8 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = { .name = "SvrWks CSB6", .init_chipset = init_chipset_svwks, .init_hwif = init_hwif_svwks, - .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE, + .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_SINGLE | + IDE_HFLAG_BOOTABLE, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, @@ -408,7 +404,8 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = { .name = "SvrWks HT1000", .init_chipset = init_chipset_svwks, .init_hwif = init_hwif_svwks, - .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE, + .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_SINGLE | + IDE_HFLAG_BOOTABLE, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA5, diff --git a/trunk/drivers/ide/pci/sgiioc4.c b/trunk/drivers/ide/pci/sgiioc4.c index 7e9dade5648d..de820aa58cd0 100644 --- a/trunk/drivers/ide/pci/sgiioc4.c +++ b/trunk/drivers/ide/pci/sgiioc4.c @@ -582,6 +582,7 @@ ide_init_sgiioc4(ide_hwif_t * hwif) hwif->pre_reset = NULL; /* No HBA specific pre_set needed */ hwif->resetproc = &sgiioc4_resetproc;/* Reset DMA engine, clear interrupts */ + hwif->intrproc = NULL; /* Enable or Disable interrupt from drive */ hwif->maskproc = &sgiioc4_maskproc; /* Mask on/off NIEN register */ hwif->quirkproc = NULL; hwif->busproc = NULL; diff --git a/trunk/drivers/ide/pci/siimage.c b/trunk/drivers/ide/pci/siimage.c index 7b45eaf5afd9..5709c252543b 100644 --- a/trunk/drivers/ide/pci/siimage.c +++ b/trunk/drivers/ide/pci/siimage.c @@ -278,14 +278,27 @@ static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed) scsc = is_sata(hwif) ? 1 : scsc; - if (speed >= XFER_UDMA_0) { - multi = dma[2]; - ultra |= (scsc ? ultra6[speed - XFER_UDMA_0] : - ultra5[speed - XFER_UDMA_0]); - mode |= (unit ? 0x30 : 0x03); - } else { - multi = dma[speed - XFER_MW_DMA_0]; - mode |= (unit ? 0x20 : 0x02); + switch(speed) { + case XFER_MW_DMA_2: + case XFER_MW_DMA_1: + case XFER_MW_DMA_0: + multi = dma[speed - XFER_MW_DMA_0]; + mode |= ((unit) ? 0x20 : 0x02); + break; + case XFER_UDMA_6: + case XFER_UDMA_5: + case XFER_UDMA_4: + case XFER_UDMA_3: + case XFER_UDMA_2: + case XFER_UDMA_1: + case XFER_UDMA_0: + multi = dma[2]; + ultra |= ((scsc) ? (ultra6[speed - XFER_UDMA_0]) : + (ultra5[speed - XFER_UDMA_0])); + mode |= ((unit) ? 0x30 : 0x03); + break; + default: + return; } if (hwif->mmio) { diff --git a/trunk/drivers/ide/pci/sis5513.c b/trunk/drivers/ide/pci/sis5513.c index 85d36996e6af..d90b42917775 100644 --- a/trunk/drivers/ide/pci/sis5513.c +++ b/trunk/drivers/ide/pci/sis5513.c @@ -305,56 +305,59 @@ static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio) sis_program_timings(drive, XFER_PIO_0 + pio); } -static void sis_ata133_program_udma_timings(ide_drive_t *drive, const u8 mode) -{ - struct pci_dev *dev = drive->hwif->pci_dev; - u32 regdw = 0; - u8 drive_pci = sis_ata133_get_base(drive), clk, idx; - - pci_read_config_dword(dev, drive_pci, ®dw); - - regdw |= 0x04; - regdw &= 0xfffff00f; - /* check if ATA133 enable */ - clk = (regdw & 0x08) ? ATA_133 : ATA_100; - idx = mode - XFER_UDMA_0; - regdw |= cycle_time_value[clk][idx] << 4; - regdw |= cvs_time_value[clk][idx] << 8; - - pci_write_config_dword(dev, drive_pci, regdw); -} - -static void sis_ata33_program_udma_timings(ide_drive_t *drive, const u8 mode) -{ - struct pci_dev *dev = drive->hwif->pci_dev; - u8 drive_pci = 0x40 + drive->dn * 2, reg = 0, i = chipset_family; - - pci_read_config_byte(dev, drive_pci + 1, ®); - - /* force the UDMA bit on if we want to use UDMA */ - reg |= 0x80; - /* clean reg cycle time bits */ - reg &= ~((0xff >> (8 - cycle_time_range[i])) << cycle_time_offset[i]); - /* set reg cycle time bits */ - reg |= cycle_time_value[i][mode - XFER_UDMA_0] << cycle_time_offset[i]; - - pci_write_config_byte(dev, drive_pci + 1, reg); -} - -static void sis_program_udma_timings(ide_drive_t *drive, const u8 mode) -{ - if (chipset_family >= ATA_133) /* ATA_133 */ - sis_ata133_program_udma_timings(drive, mode); - else /* ATA_33/66/100a/100/133a */ - sis_ata33_program_udma_timings(drive, mode); -} - static void sis_set_dma_mode(ide_drive_t *drive, const u8 speed) { - if (speed >= XFER_UDMA_0) - sis_program_udma_timings(drive, speed); - else - sis_program_timings(drive, speed); + ide_hwif_t *hwif = HWIF(drive); + struct pci_dev *dev = hwif->pci_dev; + + /* Config chip for mode */ + switch(speed) { + case XFER_UDMA_6: + case XFER_UDMA_5: + case XFER_UDMA_4: + case XFER_UDMA_3: + case XFER_UDMA_2: + case XFER_UDMA_1: + case XFER_UDMA_0: + if (chipset_family >= ATA_133) { + u32 regdw = 0; + u8 drive_pci = sis_ata133_get_base(drive); + + pci_read_config_dword(dev, drive_pci, ®dw); + regdw |= 0x04; + regdw &= 0xfffff00f; + /* check if ATA133 enable */ + if (regdw & 0x08) { + regdw |= (unsigned long)cycle_time_value[ATA_133][speed-XFER_UDMA_0] << 4; + regdw |= (unsigned long)cvs_time_value[ATA_133][speed-XFER_UDMA_0] << 8; + } else { + regdw |= (unsigned long)cycle_time_value[ATA_100][speed-XFER_UDMA_0] << 4; + regdw |= (unsigned long)cvs_time_value[ATA_100][speed-XFER_UDMA_0] << 8; + } + pci_write_config_dword(dev, (unsigned long)drive_pci, regdw); + } else { + u8 drive_pci = 0x40 + drive->dn * 2, reg = 0; + + pci_read_config_byte(dev, drive_pci+1, ®); + /* Force the UDMA bit on if we want to use UDMA */ + reg |= 0x80; + /* clean reg cycle time bits */ + reg &= ~((0xFF >> (8 - cycle_time_range[chipset_family])) + << cycle_time_offset[chipset_family]); + /* set reg cycle time bits */ + reg |= cycle_time_value[chipset_family][speed-XFER_UDMA_0] + << cycle_time_offset[chipset_family]; + pci_write_config_byte(dev, drive_pci+1, reg); + } + break; + case XFER_MW_DMA_2: + case XFER_MW_DMA_1: + case XFER_MW_DMA_0: + sis_program_timings(drive, speed); + break; + default: + break; + } } static u8 sis5513_ata133_udma_filter(ide_drive_t *drive) diff --git a/trunk/drivers/ide/pci/sl82c105.c b/trunk/drivers/ide/pci/sl82c105.c index 069f104fdcea..147d783f7529 100644 --- a/trunk/drivers/ide/pci/sl82c105.c +++ b/trunk/drivers/ide/pci/sl82c105.c @@ -115,24 +115,32 @@ static void sl82c105_set_dma_mode(ide_drive_t *drive, const u8 speed) DBG(("sl82c105_tune_chipset(drive:%s, speed:%s)\n", drive->name, ide_xfer_verbose(speed))); - drv_ctrl = mwdma_timings[speed - XFER_MW_DMA_0]; + switch (speed) { + case XFER_MW_DMA_2: + case XFER_MW_DMA_1: + case XFER_MW_DMA_0: + drv_ctrl = mwdma_timings[speed - XFER_MW_DMA_0]; - /* - * Store the DMA timings so that we can actually program - * them when DMA will be turned on... - */ - drive->drive_data &= 0x0000ffff; - drive->drive_data |= (unsigned long)drv_ctrl << 16; - - /* - * If we are already using DMA, we just reprogram - * the drive control register. - */ - if (drive->using_dma) { - struct pci_dev *dev = HWIF(drive)->pci_dev; - int reg = 0x44 + drive->dn * 4; + /* + * Store the DMA timings so that we can actually program + * them when DMA will be turned on... + */ + drive->drive_data &= 0x0000ffff; + drive->drive_data |= (unsigned long)drv_ctrl << 16; - pci_write_config_word(dev, reg, drv_ctrl); + /* + * If we are already using DMA, we just reprogram + * the drive control register. + */ + if (drive->using_dma) { + struct pci_dev *dev = HWIF(drive)->pci_dev; + int reg = 0x44 + drive->dn * 4; + + pci_write_config_word(dev, reg, drv_ctrl); + } + break; + default: + return; } } diff --git a/trunk/drivers/ide/pci/slc90e66.c b/trunk/drivers/ide/pci/slc90e66.c index dbbb46819a2d..eb4445b229ed 100644 --- a/trunk/drivers/ide/pci/slc90e66.c +++ b/trunk/drivers/ide/pci/slc90e66.c @@ -91,9 +91,19 @@ static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed) pci_read_config_word(dev, 0x48, ®48); pci_read_config_word(dev, 0x4a, ®4a); - if (speed >= XFER_UDMA_0) { - u_speed = (speed - XFER_UDMA_0) << (drive->dn * 4); + switch(speed) { + case XFER_UDMA_4: u_speed = 4 << (drive->dn * 4); break; + case XFER_UDMA_3: u_speed = 3 << (drive->dn * 4); break; + case XFER_UDMA_2: u_speed = 2 << (drive->dn * 4); break; + case XFER_UDMA_1: u_speed = 1 << (drive->dn * 4); break; + case XFER_UDMA_0: u_speed = 0 << (drive->dn * 4); break; + case XFER_MW_DMA_2: + case XFER_MW_DMA_1: + case XFER_SW_DMA_2: break; + default: return; + } + if (speed >= XFER_UDMA_0) { if (!(reg48 & u_flag)) pci_write_config_word(dev, 0x48, reg48|u_flag); /* FIXME: (reg4a & a_speed) ? */ diff --git a/trunk/drivers/ide/pci/tc86c001.c b/trunk/drivers/ide/pci/tc86c001.c index e1faf6c2fe16..a66ebd14664e 100644 --- a/trunk/drivers/ide/pci/tc86c001.c +++ b/trunk/drivers/ide/pci/tc86c001.c @@ -222,8 +222,7 @@ static const struct ide_port_info tc86c001_chipset __devinitdata = { .name = "TC86C001", .init_chipset = init_chipset_tc86c001, .init_hwif = init_hwif_tc86c001, - .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD | - IDE_HFLAG_ABUSE_SET_DMA_MODE, + .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA4, diff --git a/trunk/drivers/ide/pci/triflex.c b/trunk/drivers/ide/pci/triflex.c index ae52a96a1cf9..a227c41d23a3 100644 --- a/trunk/drivers/ide/pci/triflex.c +++ b/trunk/drivers/ide/pci/triflex.c @@ -81,6 +81,8 @@ static void triflex_set_mode(ide_drive_t *drive, const u8 speed) case XFER_PIO_0: timing = 0x0808; break; + default: + return; } triflex_timings &= ~(0xFFFF << (16 * unit)); diff --git a/trunk/drivers/ide/pci/via82cxxx.c b/trunk/drivers/ide/pci/via82cxxx.c index 4b32c90f4896..a0d3c16b68ec 100644 --- a/trunk/drivers/ide/pci/via82cxxx.c +++ b/trunk/drivers/ide/pci/via82cxxx.c @@ -439,7 +439,6 @@ static const struct ide_port_info via82cxxx_chipset __devinitdata = { .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } }, .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST | IDE_HFLAG_PIO_NO_DOWNGRADE | - IDE_HFLAG_ABUSE_SET_DMA_MODE | IDE_HFLAG_POST_SET_MODE | IDE_HFLAG_IO_32BIT | IDE_HFLAG_BOOTABLE, diff --git a/trunk/drivers/ide/ppc/pmac.c b/trunk/drivers/ide/ppc/pmac.c index 3dce80092fff..7f7a59885777 100644 --- a/trunk/drivers/ide/ppc/pmac.c +++ b/trunk/drivers/ide/ppc/pmac.c @@ -438,8 +438,13 @@ pmac_ide_init_hwif_ports(hw_regs_t *hw, if (data_port == pmac_ide[ix].regbase) break; - if (ix >= MAX_HWIFS) - return; /* not an IDE PMAC interface */ + if (ix >= MAX_HWIFS) { + /* Probably a PCI interface... */ + for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; ++i) + hw->io_ports[i] = data_port + i - IDE_DATA_OFFSET; + hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port; + return; + } for (i = 0; i < 8; ++i) hw->io_ports[i] = data_port + i * 0x10; @@ -828,20 +833,38 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed) tl[0] = *timings; tl[1] = *timings2; + switch(speed) { #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC - if (speed >= XFER_UDMA_0) { - if (pmif->kind == controller_kl_ata4) - ret = set_timings_udma_ata4(&tl[0], speed); - else if (pmif->kind == controller_un_ata6 - || pmif->kind == controller_k2_ata6) - ret = set_timings_udma_ata6(&tl[0], &tl[1], speed); - else if (pmif->kind == controller_sh_ata6) - ret = set_timings_udma_shasta(&tl[0], &tl[1], speed); - else - ret = -1; - } else - set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed); + case XFER_UDMA_6: + case XFER_UDMA_5: + case XFER_UDMA_4: + case XFER_UDMA_3: + case XFER_UDMA_2: + case XFER_UDMA_1: + case XFER_UDMA_0: + if (pmif->kind == controller_kl_ata4) + ret = set_timings_udma_ata4(&tl[0], speed); + else if (pmif->kind == controller_un_ata6 + || pmif->kind == controller_k2_ata6) + ret = set_timings_udma_ata6(&tl[0], &tl[1], speed); + else if (pmif->kind == controller_sh_ata6) + ret = set_timings_udma_shasta(&tl[0], &tl[1], speed); + else + ret = 1; + break; + case XFER_MW_DMA_2: + case XFER_MW_DMA_1: + case XFER_MW_DMA_0: + set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed); + break; + case XFER_SW_DMA_2: + case XFER_SW_DMA_1: + case XFER_SW_DMA_0: + return; #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */ + default: + ret = 1; + } if (ret) return; diff --git a/trunk/drivers/lguest/x86/core.c b/trunk/drivers/lguest/x86/core.c index 96d0fd07c57d..482aec2a9631 100644 --- a/trunk/drivers/lguest/x86/core.c +++ b/trunk/drivers/lguest/x86/core.c @@ -459,7 +459,7 @@ void __init lguest_arch_host_init(void) /* We don't need the complexity of CPUs coming and going while we're * doing this. */ - get_online_cpus(); + lock_cpu_hotplug(); if (cpu_has_pge) { /* We have a broader idea of "global". */ /* Remember that this was originally set (for cleanup). */ cpu_had_pge = 1; @@ -469,20 +469,20 @@ void __init lguest_arch_host_init(void) /* Turn off the feature in the global feature set. */ clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); } - put_online_cpus(); + unlock_cpu_hotplug(); }; /*:*/ void __exit lguest_arch_host_fini(void) { /* If we had PGE before we started, turn it back on now. */ - get_online_cpus(); + lock_cpu_hotplug(); if (cpu_had_pge) { set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); /* adjust_pge's argument "1" means set PGE. */ on_each_cpu(adjust_pge, (void *)1, 0, 1); } - put_online_cpus(); + unlock_cpu_hotplug(); } diff --git a/trunk/drivers/media/dvb/dvb-core/dvb_frontend.h b/trunk/drivers/media/dvb/dvb-core/dvb_frontend.h index a5262e852c82..c49fa049717e 100644 --- a/trunk/drivers/media/dvb/dvb-core/dvb_frontend.h +++ b/trunk/drivers/media/dvb/dvb-core/dvb_frontend.h @@ -84,6 +84,9 @@ struct dvb_tuner_ops { /** This is support for demods like the mt352 - fills out the supplied buffer with what to write. */ int (*calc_regs)(struct dvb_frontend *fe, struct dvb_frontend_parameters *p, u8 *buf, int buf_len); + /** This is to allow setting tuner-specific configs */ + int (*set_config)(struct dvb_frontend *fe, void *priv_cfg); + int (*get_frequency)(struct dvb_frontend *fe, u32 *frequency); int (*get_bandwidth)(struct dvb_frontend *fe, u32 *bandwidth); diff --git a/trunk/drivers/media/video/bt8xx/bttv-cards.c b/trunk/drivers/media/video/bt8xx/bttv-cards.c index 585d1ef95afd..4f91f98db37b 100644 --- a/trunk/drivers/media/video/bt8xx/bttv-cards.c +++ b/trunk/drivers/media/video/bt8xx/bttv-cards.c @@ -3574,8 +3574,12 @@ void __devinit bttv_init_card2(struct bttv *btv) } if (btv->tda9887_conf) { - bttv_call_i2c_clients(btv, TDA9887_SET_CONFIG, - &btv->tda9887_conf); + struct v4l2_priv_tun_config tda9887_cfg; + + tda9887_cfg.tuner = TUNER_TDA9887; + tda9887_cfg.priv = &btv->tda9887_conf; + + bttv_call_i2c_clients(btv, TUNER_SET_CONFIG, &tda9887_cfg); } btv->svhs = bttv_tvcards[btv->c.type].svhs; diff --git a/trunk/drivers/media/video/cx88/cx88-i2c.c b/trunk/drivers/media/video/cx88/cx88-i2c.c index c8b1c50625f4..937497c86247 100644 --- a/trunk/drivers/media/video/cx88/cx88-i2c.c +++ b/trunk/drivers/media/video/cx88/cx88-i2c.c @@ -127,8 +127,14 @@ static int attach_inform(struct i2c_client *client) } } - if (core->board.tda9887_conf) - client->driver->command(client, TDA9887_SET_CONFIG, &core->board.tda9887_conf); + if (core->board.tda9887_conf) { + struct v4l2_priv_tun_config tda9887_cfg; + + tda9887_cfg.tuner = TUNER_TDA9887; + tda9887_cfg.priv = &core->board.tda9887_conf; + + client->driver->command(client, TUNER_SET_CONFIG, &tda9887_cfg); + } return 0; } diff --git a/trunk/drivers/media/video/em28xx/em28xx-i2c.c b/trunk/drivers/media/video/em28xx/em28xx-i2c.c index e3a4aa7a9df4..e48191fb1dde 100644 --- a/trunk/drivers/media/video/em28xx/em28xx-i2c.c +++ b/trunk/drivers/media/video/em28xx/em28xx-i2c.c @@ -421,6 +421,8 @@ static int attach_inform(struct i2c_client *client) case 0x96: case 0x94: { + struct v4l2_priv_tun_config tda9887_cfg; + struct tuner_setup tun_setup; tun_setup.mode_mask = T_ANALOG_TV | T_RADIO; @@ -428,7 +430,11 @@ static int attach_inform(struct i2c_client *client) tun_setup.addr = client->addr; em28xx_i2c_call_clients(dev, TUNER_SET_TYPE_ADDR, &tun_setup); - em28xx_i2c_call_clients(dev, TDA9887_SET_CONFIG, &dev->tda9887_conf); + + tda9887_cfg.tuner = TUNER_TDA9887; + tda9887_cfg.priv = &dev->tda9887_conf; + em28xx_i2c_call_clients(dev, TUNER_SET_CONFIG, + &tda9887_cfg); break; } case 0x42: diff --git a/trunk/drivers/media/video/em28xx/em28xx-video.c b/trunk/drivers/media/video/em28xx/em28xx-video.c index 0906bc5766cc..bb0933f9d84c 100644 --- a/trunk/drivers/media/video/em28xx/em28xx-video.c +++ b/trunk/drivers/media/video/em28xx/em28xx-video.c @@ -186,11 +186,6 @@ static void em28xx_config_i2c(struct em28xx *dev) f.frequency = 9076; /* FIXME:remove magic number */ dev->ctl_freq = f.frequency; em28xx_i2c_call_clients(dev, VIDIOC_S_FREQUENCY, &f); - - /* configure tda9887 */ - - -/* em28xx_i2c_call_clients(dev,VIDIOC_S_STD,&dev->tvnorm->id); */ } /* diff --git a/trunk/drivers/media/video/saa7134/saa7134-cards.c b/trunk/drivers/media/video/saa7134/saa7134-cards.c index 98c1b084a716..78d7d7059a9e 100644 --- a/trunk/drivers/media/video/saa7134/saa7134-cards.c +++ b/trunk/drivers/media/video/saa7134/saa7134-cards.c @@ -4570,8 +4570,17 @@ int saa7134_board_init2(struct saa7134_dev *dev) printk(KERN_INFO "%s Tuner type is %d\n", dev->name, dev->tuner_type); if (dev->tuner_type == TUNER_PHILIPS_FMD1216ME_MK3) { - dev->tda9887_conf = TDA9887_PRESENT | TDA9887_PORT1_ACTIVE | TDA9887_PORT2_ACTIVE; - saa7134_i2c_call_clients(dev,TDA9887_SET_CONFIG, &dev->tda9887_conf); + struct v4l2_priv_tun_config tda9887_cfg; + + tda9887_cfg.tuner = TUNER_TDA9887; + tda9887_cfg.priv = &dev->tda9887_conf; + + dev->tda9887_conf = TDA9887_PRESENT | + TDA9887_PORT1_ACTIVE | + TDA9887_PORT2_ACTIVE; + + saa7134_i2c_call_clients(dev, TUNER_SET_CONFIG, + &tda9887_cfg); } tun_setup.mode_mask = T_RADIO | T_ANALOG_TV | T_DIGITAL_TV; diff --git a/trunk/drivers/media/video/saa7134/saa7134-dvb.c b/trunk/drivers/media/video/saa7134/saa7134-dvb.c index e1ab099ec4c6..a9ca5730826f 100644 --- a/trunk/drivers/media/video/saa7134/saa7134-dvb.c +++ b/trunk/drivers/media/video/saa7134/saa7134-dvb.c @@ -1073,14 +1073,21 @@ static int dvb_init(struct saa7134_dev *dev) static int dvb_fini(struct saa7134_dev *dev) { - static int on = TDA9887_PRESENT | TDA9887_PORT2_INACTIVE; + /* FIXME: I suspect that this code is bogus, since the entry for + Pinnacle 300I DVB-T PAL already defines the proper init to allow + the detection of mt2032 (TDA9887_PORT2_INACTIVE) + */ + if (dev->board == SAA7134_BOARD_PINNACLE_300I_DVBT_PAL) { + struct v4l2_priv_tun_config tda9887_cfg; + static int on = TDA9887_PRESENT | TDA9887_PORT2_INACTIVE; + + tda9887_cfg.tuner = TUNER_TDA9887; + tda9887_cfg.priv = &on; - switch (dev->board) { - case SAA7134_BOARD_PINNACLE_300I_DVBT_PAL: /* otherwise we don't detect the tuner on next insmod */ - saa7134_i2c_call_clients(dev,TDA9887_SET_CONFIG,&on); - break; - }; + saa7134_i2c_call_clients(dev, TUNER_SET_CONFIG, &tda9887_cfg); + } + videobuf_dvb_unregister(&dev->dvb); return 0; } diff --git a/trunk/drivers/media/video/saa7134/saa7134-i2c.c b/trunk/drivers/media/video/saa7134/saa7134-i2c.c index 6deaad1a5480..800b397f8f16 100644 --- a/trunk/drivers/media/video/saa7134/saa7134-i2c.c +++ b/trunk/drivers/media/video/saa7134/saa7134-i2c.c @@ -323,7 +323,6 @@ static int attach_inform(struct i2c_client *client) { struct saa7134_dev *dev = client->adapter->algo_data; int tuner = dev->tuner_type; - int conf = dev->tda9887_conf; struct tuner_setup tun_setup; d1printk( "%s i2c attach [addr=0x%x,client=%s]\n", @@ -360,7 +359,6 @@ static int attach_inform(struct i2c_client *client) } if (tuner != UNSET) { - tun_setup.type = tuner; tun_setup.addr = saa7134_boards[dev->board].tuner_addr; tun_setup.config = saa7134_boards[dev->board].tuner_config; @@ -372,9 +370,18 @@ static int attach_inform(struct i2c_client *client) client->driver->command(client,TUNER_SET_TYPE_ADDR, &tun_setup); } + + if (tuner == TUNER_TDA9887) { + struct v4l2_priv_tun_config tda9887_cfg; + + tda9887_cfg.tuner = TUNER_TDA9887; + tda9887_cfg.priv = &dev->tda9887_conf; + + client->driver->command(client, TUNER_SET_CONFIG, + &tda9887_cfg); + } } - client->driver->command(client, TDA9887_SET_CONFIG, &conf); return 0; } diff --git a/trunk/drivers/media/video/saa7134/saa7134-video.c b/trunk/drivers/media/video/saa7134/saa7134-video.c index 6396d9b5c063..2c1d56ab2f2a 100644 --- a/trunk/drivers/media/video/saa7134/saa7134-video.c +++ b/trunk/drivers/media/video/saa7134/saa7134-video.c @@ -1236,16 +1236,24 @@ static int set_control(struct saa7134_dev *dev, struct saa7134_fh *fh, restart_overlay = 1; break; case V4L2_CID_PRIVATE_AUTOMUTE: + { + struct v4l2_priv_tun_config tda9887_cfg; + + tda9887_cfg.tuner = TUNER_TDA9887; + tda9887_cfg.priv = &dev->tda9887_conf; + dev->ctl_automute = c->value; if (dev->tda9887_conf) { if (dev->ctl_automute) dev->tda9887_conf |= TDA9887_AUTOMUTE; else dev->tda9887_conf &= ~TDA9887_AUTOMUTE; - saa7134_i2c_call_clients(dev, TDA9887_SET_CONFIG, - &dev->tda9887_conf); + + saa7134_i2c_call_clients(dev, TUNER_SET_CONFIG, + &tda9887_cfg); } break; + } default: return -EINVAL; } diff --git a/trunk/drivers/media/video/tuner-core.c b/trunk/drivers/media/video/tuner-core.c index 9e99f3636d3d..d1d6c664bb09 100644 --- a/trunk/drivers/media/video/tuner-core.c +++ b/trunk/drivers/media/video/tuner-core.c @@ -889,14 +889,29 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg) return 0; } #endif - case TDA9887_SET_CONFIG: - if (t->type == TUNER_TDA9887) { - int *i = arg; + case TUNER_SET_CONFIG: + { + struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; + struct v4l2_priv_tun_config *cfg = arg; + + if (t->type != cfg->tuner) + break; - t->tda9887_config = *i; + if (t->type == TUNER_TDA9887) { + t->tda9887_config = *(unsigned int *)cfg->priv; set_freq(client, t->tv_freq); + break; } + + if (NULL == fe_tuner_ops->set_config) { + tuner_warn("Tuner frontend module has no way to " + "set config\n"); + break; + } + fe_tuner_ops->set_config(&t->fe, cfg->priv); + break; + } /* --- v4l ioctls --- */ /* take care: bttv does userspace copying, we'll get a kernel pointer here... */ diff --git a/trunk/drivers/media/video/tuner-simple.c b/trunk/drivers/media/video/tuner-simple.c index 7b93d3b1f4c6..eec13cb6cd6f 100644 --- a/trunk/drivers/media/video/tuner-simple.c +++ b/trunk/drivers/media/video/tuner-simple.c @@ -355,10 +355,14 @@ static int simple_set_tv_freq(struct dvb_frontend *fe, } priv->last_div = div; if (t_params->has_tda9887) { + struct v4l2_priv_tun_config tda9887_cfg; int config = 0; int is_secam_l = (params->std & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC)) && !(params->std & ~(V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC)); + tda9887_cfg.tuner = TUNER_TDA9887; + tda9887_cfg.priv = &config; + if (params->std == V4L2_STD_SECAM_LC) { if (t_params->port1_active ^ t_params->port1_invert_for_secam_lc) config |= TDA9887_PORT1_ACTIVE; @@ -391,7 +395,8 @@ static int simple_set_tv_freq(struct dvb_frontend *fe, } if (t_params->default_pll_gating_18) config |= TDA9887_GATING_18; - i2c_clients_command(priv->i2c_props.adap, TDA9887_SET_CONFIG, &config); + i2c_clients_command(priv->i2c_props.adap, TUNER_SET_CONFIG, + &tda9887_cfg); } tuner_dbg("tv 0x%02x 0x%02x 0x%02x 0x%02x\n", buffer[0],buffer[1],buffer[2],buffer[3]); @@ -534,6 +539,11 @@ static int simple_set_radio_freq(struct dvb_frontend *fe, if (t_params->has_tda9887) { int config = 0; + struct v4l2_priv_tun_config tda9887_cfg; + + tda9887_cfg.tuner = TUNER_TDA9887; + tda9887_cfg.priv = &config; + if (t_params->port1_active && !t_params->port1_fm_high_sensitivity) config |= TDA9887_PORT1_ACTIVE; if (t_params->port2_active && !t_params->port2_fm_high_sensitivity) @@ -546,7 +556,8 @@ static int simple_set_radio_freq(struct dvb_frontend *fe, config |= TDA9887_GAIN_NORMAL; if (t_params->radio_if == 2) config |= TDA9887_RIF_41_3; - i2c_clients_command(priv->i2c_props.adap, TDA9887_SET_CONFIG, &config); + i2c_clients_command(priv->i2c_props.adap, TUNER_SET_CONFIG, + &tda9887_cfg); } if (4 != (rc = tuner_i2c_xfer_send(&priv->i2c_props,buffer,4))) tuner_warn("i2c i/o error: rc == %d (should be 4)\n",rc); diff --git a/trunk/drivers/media/video/v4l2-common.c b/trunk/drivers/media/video/v4l2-common.c index 1141b4bf41ce..a4d68d3ba5e4 100644 --- a/trunk/drivers/media/video/v4l2-common.c +++ b/trunk/drivers/media/video/v4l2-common.c @@ -400,7 +400,7 @@ static const char *v4l2_int_ioctls[] = { [_IOC_NR(TUNER_SET_TYPE_ADDR)] = "TUNER_SET_TYPE_ADDR", [_IOC_NR(TUNER_SET_STANDBY)] = "TUNER_SET_STANDBY", - [_IOC_NR(TDA9887_SET_CONFIG)] = "TDA9887_SET_CONFIG", + [_IOC_NR(TUNER_SET_CONFIG)] = "TUNER_SET_CONFIG", [_IOC_NR(VIDIOC_INT_S_TUNER_MODE)] = "VIDIOC_INT_S_TUNER_MODE", [_IOC_NR(VIDIOC_INT_RESET)] = "VIDIOC_INT_RESET", diff --git a/trunk/drivers/s390/char/sclp_config.c b/trunk/drivers/s390/char/sclp_config.c index 9dc77f14fa52..5322e5e54a98 100644 --- a/trunk/drivers/s390/char/sclp_config.c +++ b/trunk/drivers/s390/char/sclp_config.c @@ -29,12 +29,12 @@ static void sclp_cpu_capability_notify(struct work_struct *work) struct sys_device *sysdev; printk(KERN_WARNING TAG "cpu capability changed.\n"); - get_online_cpus(); + lock_cpu_hotplug(); for_each_online_cpu(cpu) { sysdev = get_cpu_sysdev(cpu); kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); } - put_online_cpus(); + unlock_cpu_hotplug(); } static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) diff --git a/trunk/drivers/scsi/ide-scsi.c b/trunk/drivers/scsi/ide-scsi.c index 02e91893064d..9706de9d98d5 100644 --- a/trunk/drivers/scsi/ide-scsi.c +++ b/trunk/drivers/scsi/ide-scsi.c @@ -395,12 +395,14 @@ static int idescsi_expiry(ide_drive_t *drive) static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive) { idescsi_scsi_t *scsi = drive_to_idescsi(drive); - ide_hwif_t *hwif = drive->hwif; - idescsi_pc_t *pc = scsi->pc; + idescsi_pc_t *pc=scsi->pc; struct request *rq = pc->rq; + atapi_bcount_t bcount; + atapi_status_t status; + atapi_ireason_t ireason; + atapi_feature_t feature; + unsigned int temp; - u16 bcount; - u8 stat, ireason; #if IDESCSI_DEBUG_LOG printk (KERN_INFO "ide-scsi: Reached idescsi_pc_intr interrupt handler\n"); @@ -423,29 +425,30 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive) (void) HWIF(drive)->ide_dma_end(drive); } + feature.all = 0; /* Clear the interrupt */ - stat = drive->hwif->INB(IDE_STATUS_REG); + status.all = HWIF(drive)->INB(IDE_STATUS_REG); - if ((stat & DRQ_STAT) == 0) { + if (!status.b.drq) { /* No more interrupts */ if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) printk (KERN_INFO "Packet command completed, %d bytes transferred\n", pc->actually_transferred); local_irq_enable_in_hardirq(); - if (stat & ERR_STAT) + if (status.b.check) rq->errors++; idescsi_end_request (drive, 1, 0); return ide_stopped; } - bcount = (hwif->INB(IDE_BCOUNTH_REG) << 8) | - hwif->INB(IDE_BCOUNTL_REG); - ireason = hwif->INB(IDE_IREASON_REG); + bcount.b.low = HWIF(drive)->INB(IDE_BCOUNTL_REG); + bcount.b.high = HWIF(drive)->INB(IDE_BCOUNTH_REG); + ireason.all = HWIF(drive)->INB(IDE_IREASON_REG); - if (ireason & CD) { + if (ireason.b.cod) { printk(KERN_ERR "ide-scsi: CoD != 0 in idescsi_pc_intr\n"); return ide_do_reset (drive); } - if (ireason & IO) { - temp = pc->actually_transferred + bcount; + if (ireason.b.io) { + temp = pc->actually_transferred + bcount.all; if (temp > pc->request_transfer) { if (temp > pc->buffer_size) { printk(KERN_ERR "ide-scsi: The scsi wants to " @@ -458,13 +461,11 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive) idescsi_input_buffers(drive, pc, temp); else drive->hwif->atapi_input_bytes(drive, pc->current_position, temp); - printk(KERN_ERR "ide-scsi: transferred" - " %d of %d bytes\n", - temp, bcount); + printk(KERN_ERR "ide-scsi: transferred %d of %d bytes\n", temp, bcount.all); } pc->actually_transferred += temp; pc->current_position += temp; - idescsi_discard_data(drive, bcount - temp); + idescsi_discard_data(drive, bcount.all - temp); ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry); return ide_started; } @@ -473,24 +474,22 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive) #endif /* IDESCSI_DEBUG_LOG */ } } - if (ireason & IO) { + if (ireason.b.io) { clear_bit(PC_WRITING, &pc->flags); if (pc->sg) - idescsi_input_buffers(drive, pc, bcount); + idescsi_input_buffers(drive, pc, bcount.all); else - hwif->atapi_input_bytes(drive, pc->current_position, - bcount); + HWIF(drive)->atapi_input_bytes(drive, pc->current_position, bcount.all); } else { set_bit(PC_WRITING, &pc->flags); if (pc->sg) - idescsi_output_buffers(drive, pc, bcount); + idescsi_output_buffers (drive, pc, bcount.all); else - hwif->atapi_output_bytes(drive, pc->current_position, - bcount); + HWIF(drive)->atapi_output_bytes(drive, pc->current_position, bcount.all); } /* Update the current position */ - pc->actually_transferred += bcount; - pc->current_position += bcount; + pc->actually_transferred += bcount.all; + pc->current_position += bcount.all; /* And set the interrupt handler again */ ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry); @@ -502,16 +501,16 @@ static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive) ide_hwif_t *hwif = drive->hwif; idescsi_scsi_t *scsi = drive_to_idescsi(drive); idescsi_pc_t *pc = scsi->pc; + atapi_ireason_t ireason; ide_startstop_t startstop; - u8 ireason; if (ide_wait_stat(&startstop,drive,DRQ_STAT,BUSY_STAT,WAIT_READY)) { printk(KERN_ERR "ide-scsi: Strange, packet command " "initiated yet DRQ isn't asserted\n"); return startstop; } - ireason = hwif->INB(IDE_IREASON_REG); - if ((ireason & CD) == 0 || (ireason & IO)) { + ireason.all = HWIF(drive)->INB(IDE_IREASON_REG); + if (!ireason.b.cod || ireason.b.io) { printk(KERN_ERR "ide-scsi: (IO,CoD) != (0,1) while " "issuing a packet command\n"); return ide_do_reset (drive); @@ -574,26 +573,30 @@ static ide_startstop_t idescsi_issue_pc (ide_drive_t *drive, idescsi_pc_t *pc) { idescsi_scsi_t *scsi = drive_to_idescsi(drive); ide_hwif_t *hwif = drive->hwif; - u16 bcount; - u8 dma = 0; + atapi_feature_t feature; + atapi_bcount_t bcount; scsi->pc=pc; /* Set the current packet command */ pc->actually_transferred=0; /* We haven't transferred any data yet */ pc->current_position=pc->buffer; - /* Request to transfer the entire buffer at once */ - bcount = min(pc->request_transfer, 63 * 1024); + bcount.all = min(pc->request_transfer, 63 * 1024); /* Request to transfer the entire buffer at once */ + feature.all = 0; if (drive->using_dma && !idescsi_map_sg(drive, pc)) { hwif->sg_mapped = 1; - dma = !hwif->dma_setup(drive); + feature.b.dma = !hwif->dma_setup(drive); hwif->sg_mapped = 0; } SELECT_DRIVE(drive); + if (IDE_CONTROL_REG) + HWIF(drive)->OUTB(drive->ctl, IDE_CONTROL_REG); - ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK, bcount, dma); + HWIF(drive)->OUTB(feature.all, IDE_FEATURE_REG); + HWIF(drive)->OUTB(bcount.b.high, IDE_BCOUNTH_REG); + HWIF(drive)->OUTB(bcount.b.low, IDE_BCOUNTL_REG); - if (dma) + if (feature.b.dma) set_bit(PC_DMA_OK, &pc->flags); if (test_bit(IDESCSI_DRQ_INTERRUPT, &scsi->flags)) { diff --git a/trunk/fs/Kconfig b/trunk/fs/Kconfig index b4799efaf9e8..781b47d2f9f2 100644 --- a/trunk/fs/Kconfig +++ b/trunk/fs/Kconfig @@ -2130,3 +2130,4 @@ source "fs/nls/Kconfig" source "fs/dlm/Kconfig" endmenu + diff --git a/trunk/fs/proc/base.c b/trunk/fs/proc/base.c index 91fa8e6ce8ad..7411bfb0b7cc 100644 --- a/trunk/fs/proc/base.c +++ b/trunk/fs/proc/base.c @@ -310,77 +310,6 @@ static int proc_pid_schedstat(struct task_struct *task, char *buffer) } #endif -#ifdef CONFIG_LATENCYTOP -static int lstats_show_proc(struct seq_file *m, void *v) -{ - int i; - struct task_struct *task = m->private; - seq_puts(m, "Latency Top version : v0.1\n"); - - for (i = 0; i < 32; i++) { - if (task->latency_record[i].backtrace[0]) { - int q; - seq_printf(m, "%i %li %li ", - task->latency_record[i].count, - task->latency_record[i].time, - task->latency_record[i].max); - for (q = 0; q < LT_BACKTRACEDEPTH; q++) { - char sym[KSYM_NAME_LEN]; - char *c; - if (!task->latency_record[i].backtrace[q]) - break; - if (task->latency_record[i].backtrace[q] == ULONG_MAX) - break; - sprint_symbol(sym, task->latency_record[i].backtrace[q]); - c = strchr(sym, '+'); - if (c) - *c = 0; - seq_printf(m, "%s ", sym); - } - seq_printf(m, "\n"); - } - - } - return 0; -} - -static int lstats_open(struct inode *inode, struct file *file) -{ - int ret; - struct seq_file *m; - struct task_struct *task = get_proc_task(inode); - - ret = single_open(file, lstats_show_proc, NULL); - if (!ret) { - m = file->private_data; - m->private = task; - } - return ret; -} - -static ssize_t lstats_write(struct file *file, const char __user *buf, - size_t count, loff_t *offs) -{ - struct seq_file *m; - struct task_struct *task; - - m = file->private_data; - task = m->private; - clear_all_latency_tracing(task); - - return count; -} - -static const struct file_operations proc_lstats_operations = { - .open = lstats_open, - .read = seq_read, - .write = lstats_write, - .llseek = seq_lseek, - .release = single_release, -}; - -#endif - /* The badness from the OOM killer */ unsigned long badness(struct task_struct *p, unsigned long uptime); static int proc_oom_score(struct task_struct *task, char *buffer) @@ -1091,7 +1020,6 @@ static const struct file_operations proc_fault_inject_operations = { }; #endif - #ifdef CONFIG_SCHED_DEBUG /* * Print out various scheduling related per-task fields: @@ -2302,9 +2230,6 @@ static const struct pid_entry tgid_base_stuff[] = { #ifdef CONFIG_SCHEDSTATS INF("schedstat", S_IRUGO, pid_schedstat), #endif -#ifdef CONFIG_LATENCYTOP - REG("latency", S_IRUGO, lstats), -#endif #ifdef CONFIG_PROC_PID_CPUSET REG("cpuset", S_IRUGO, cpuset), #endif @@ -2630,9 +2555,6 @@ static const struct pid_entry tid_base_stuff[] = { #ifdef CONFIG_SCHEDSTATS INF("schedstat", S_IRUGO, pid_schedstat), #endif -#ifdef CONFIG_LATENCYTOP - REG("latency", S_IRUGO, lstats), -#endif #ifdef CONFIG_PROC_PID_CPUSET REG("cpuset", S_IRUGO, cpuset), #endif diff --git a/trunk/include/asm-cris/arch-v10/ide.h b/trunk/include/asm-cris/arch-v10/ide.h index ea34e0d0a388..78b301ed7b12 100644 --- a/trunk/include/asm-cris/arch-v10/ide.h +++ b/trunk/include/asm-cris/arch-v10/ide.h @@ -89,6 +89,11 @@ static inline void ide_init_default_hwifs(void) } } +/* some configuration options we don't need */ + +#undef SUPPORT_VLB_SYNC +#define SUPPORT_VLB_SYNC 0 + #endif /* __KERNEL__ */ #endif /* __ASMCRIS_IDE_H */ diff --git a/trunk/include/asm-cris/arch-v32/ide.h b/trunk/include/asm-cris/arch-v32/ide.h index fb9c3627a5b4..11296170d057 100644 --- a/trunk/include/asm-cris/arch-v32/ide.h +++ b/trunk/include/asm-cris/arch-v32/ide.h @@ -48,6 +48,11 @@ static inline unsigned long ide_default_io_base(int index) return REG_TYPE_CONV(unsigned long, reg_ata_rw_ctrl2, ctrl2); } +/* some configuration options we don't need */ + +#undef SUPPORT_VLB_SYNC +#define SUPPORT_VLB_SYNC 0 + #define IDE_ARCH_ACK_INTR #define ide_ack_intr(hwif) ((hwif)->ack_intr(hwif)) diff --git a/trunk/include/asm-frv/ide.h b/trunk/include/asm-frv/ide.h index 8c9a540d4344..f0bd2cb250c1 100644 --- a/trunk/include/asm-frv/ide.h +++ b/trunk/include/asm-frv/ide.h @@ -18,6 +18,12 @@ #include #include +#undef SUPPORT_SLOW_DATA_PORTS +#define SUPPORT_SLOW_DATA_PORTS 0 + +#undef SUPPORT_VLB_SYNC +#define SUPPORT_VLB_SYNC 0 + #ifndef MAX_HWIFS #define MAX_HWIFS 8 #endif diff --git a/trunk/include/asm-generic/resource.h b/trunk/include/asm-generic/resource.h index 587566f95f6c..a4a22cc35898 100644 --- a/trunk/include/asm-generic/resource.h +++ b/trunk/include/asm-generic/resource.h @@ -44,8 +44,8 @@ #define RLIMIT_NICE 13 /* max nice prio allowed to raise to 0-39 for nice level 19 .. -20 */ #define RLIMIT_RTPRIO 14 /* maximum realtime priority */ -#define RLIMIT_RTTIME 15 /* timeout for RT tasks in us */ -#define RLIM_NLIMITS 16 + +#define RLIM_NLIMITS 15 /* * SuS says limits have to be unsigned. @@ -86,7 +86,6 @@ [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \ [RLIMIT_NICE] = { 0, 0 }, \ [RLIMIT_RTPRIO] = { 0, 0 }, \ - [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ } #endif /* __KERNEL__ */ diff --git a/trunk/include/asm-powerpc/ide.h b/trunk/include/asm-powerpc/ide.h index 6d50310ecaea..fd7f5a430f0a 100644 --- a/trunk/include/asm-powerpc/ide.h +++ b/trunk/include/asm-powerpc/ide.h @@ -42,6 +42,9 @@ struct ide_machdep_calls { extern struct ide_machdep_calls ppc_ide_md; +#undef SUPPORT_SLOW_DATA_PORTS +#define SUPPORT_SLOW_DATA_PORTS 0 + #define IDE_ARCH_OBSOLETE_DEFAULTS static __inline__ int ide_default_irq(unsigned long base) diff --git a/trunk/include/asm-x86/thread_info_32.h b/trunk/include/asm-x86/thread_info_32.h index ef58fd2a6eb0..22a8cbcd35e2 100644 --- a/trunk/include/asm-x86/thread_info_32.h +++ b/trunk/include/asm-x86/thread_info_32.h @@ -132,7 +132,6 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ #define TIF_SECCOMP 7 /* secure computing */ #define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */ -#define TIF_HRTICK_RESCHED 9 /* reprogram hrtick timer */ #define TIF_MEMDIE 16 #define TIF_DEBUG 17 /* uses debug registers */ #define TIF_IO_BITMAP 18 /* uses I/O bitmap */ @@ -148,7 +147,6 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SYSCALL_AUDIT (1< -#include +#include -struct inode; -struct mm_struct; -struct task_struct; union ktime; /* Second argument to futex syscall */ diff --git a/trunk/include/linux/hardirq.h b/trunk/include/linux/hardirq.h index 2961ec788046..8d302298a161 100644 --- a/trunk/include/linux/hardirq.h +++ b/trunk/include/linux/hardirq.h @@ -72,7 +72,11 @@ #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) -#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) +#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) +# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked()) +#else +# define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) +#endif #ifdef CONFIG_PREEMPT # define PREEMPT_CHECK_OFFSET 1 diff --git a/trunk/include/linux/hdreg.h b/trunk/include/linux/hdreg.h index ff43f8d6b5b3..818c6afc1091 100644 --- a/trunk/include/linux/hdreg.h +++ b/trunk/include/linux/hdreg.h @@ -44,9 +44,7 @@ /* Bits for HD_ERROR */ #define MARK_ERR 0x01 /* Bad address mark */ -#define ILI_ERR 0x01 /* Illegal Length Indication (ATAPI) */ #define TRK0_ERR 0x02 /* couldn't find track 0 */ -#define EOM_ERR 0x02 /* End Of Media (ATAPI) */ #define ABRT_ERR 0x04 /* Command aborted */ #define MCR_ERR 0x08 /* media change request */ #define ID_ERR 0x10 /* ID field not found */ @@ -54,7 +52,6 @@ #define ECC_ERR 0x40 /* Uncorrectable ECC error */ #define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */ #define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */ -#define LFS_ERR 0xf0 /* Last Failed Sense (ATAPI) */ /* Bits of HD_NSECTOR */ #define CD 0x01 @@ -73,13 +70,13 @@ #define HDIO_DRIVE_HOB_HDR_SIZE (8 * sizeof(__u8)) #define HDIO_DRIVE_TASK_HDR_SIZE (8 * sizeof(__u8)) -#define IDE_DRIVE_TASK_NO_DATA 0 -#ifndef __KERNEL__ #define IDE_DRIVE_TASK_INVALID -1 +#define IDE_DRIVE_TASK_NO_DATA 0 #define IDE_DRIVE_TASK_SET_XFER 1 + #define IDE_DRIVE_TASK_IN 2 + #define IDE_DRIVE_TASK_OUT 3 -#endif #define IDE_DRIVE_TASK_RAW_WRITE 4 /* @@ -90,10 +87,10 @@ #ifndef __KERNEL__ #define IDE_TASKFILE_STD_OUT_FLAGS 0xFE #define IDE_HOB_STD_OUT_FLAGS 0x3C +#endif typedef unsigned char task_ioreg_t; typedef unsigned long sata_ioreg_t; -#endif typedef union ide_reg_valid_s { unsigned all : 16; @@ -119,8 +116,8 @@ typedef union ide_reg_valid_s { } ide_reg_valid_t; typedef struct ide_task_request_s { - __u8 io_ports[8]; - __u8 hob_ports[8]; /* bytes 6 and 7 are unused */ + task_ioreg_t io_ports[8]; + task_ioreg_t hob_ports[8]; ide_reg_valid_t out_flags; ide_reg_valid_t in_flags; int data_phase; @@ -136,35 +133,36 @@ typedef struct ide_ioctl_request_s { } ide_ioctl_request_t; struct hd_drive_cmd_hdr { - __u8 command; - __u8 sector_number; - __u8 feature; - __u8 sector_count; + task_ioreg_t command; + task_ioreg_t sector_number; + task_ioreg_t feature; + task_ioreg_t sector_count; }; -#ifndef __KERNEL__ typedef struct hd_drive_task_hdr { - __u8 data; - __u8 feature; - __u8 sector_count; - __u8 sector_number; - __u8 low_cylinder; - __u8 high_cylinder; - __u8 device_head; - __u8 command; + task_ioreg_t data; + task_ioreg_t feature; + task_ioreg_t sector_count; + task_ioreg_t sector_number; + task_ioreg_t low_cylinder; + task_ioreg_t high_cylinder; + task_ioreg_t device_head; + task_ioreg_t command; } task_struct_t; typedef struct hd_drive_hob_hdr { - __u8 data; - __u8 feature; - __u8 sector_count; - __u8 sector_number; - __u8 low_cylinder; - __u8 high_cylinder; - __u8 device_head; - __u8 control; + task_ioreg_t data; + task_ioreg_t feature; + task_ioreg_t sector_count; + task_ioreg_t sector_number; + task_ioreg_t low_cylinder; + task_ioreg_t high_cylinder; + task_ioreg_t device_head; + task_ioreg_t control; } hob_struct_t; -#endif + +#define TASKFILE_INVALID 0x7fff +#define TASKFILE_48 0x8000 #define TASKFILE_NO_DATA 0x0000 @@ -180,16 +178,12 @@ typedef struct hd_drive_hob_hdr { #define TASKFILE_IN_DMAQ 0x0080 #define TASKFILE_OUT_DMAQ 0x0100 -#ifndef __KERNEL__ #define TASKFILE_P_IN 0x0200 #define TASKFILE_P_OUT 0x0400 #define TASKFILE_P_IN_DMA 0x0800 #define TASKFILE_P_OUT_DMA 0x1000 #define TASKFILE_P_IN_DMAQ 0x2000 #define TASKFILE_P_OUT_DMAQ 0x4000 -#define TASKFILE_48 0x8000 -#define TASKFILE_INVALID 0x7fff -#endif /* ATA/ATAPI Commands pre T13 Spec */ #define WIN_NOP 0x00 diff --git a/trunk/include/linux/hrtimer.h b/trunk/include/linux/hrtimer.h index 49067f14fac1..7a9398e19704 100644 --- a/trunk/include/linux/hrtimer.h +++ b/trunk/include/linux/hrtimer.h @@ -115,8 +115,10 @@ struct hrtimer { enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; unsigned long state; +#ifdef CONFIG_HIGH_RES_TIMERS enum hrtimer_cb_mode cb_mode; struct list_head cb_entry; +#endif #ifdef CONFIG_TIMER_STATS void *start_site; char start_comm[16]; @@ -192,10 +194,10 @@ struct hrtimer_cpu_base { spinlock_t lock; struct lock_class_key lock_key; struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; - struct list_head cb_pending; #ifdef CONFIG_HIGH_RES_TIMERS ktime_t expires_next; int hres_active; + struct list_head cb_pending; unsigned long nr_events; #endif }; @@ -215,11 +217,6 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) return timer->base->get_time(); } -static inline int hrtimer_is_hres_active(struct hrtimer *timer) -{ - return timer->base->cpu_base->hres_active; -} - /* * The resolution of the clocks. The resolution value is returned in * the clock_getres() system call to give application programmers an @@ -251,10 +248,6 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) return timer->base->softirq_time; } -static inline int hrtimer_is_hres_active(struct hrtimer *timer) -{ - return 0; -} #endif extern ktime_t ktime_get(void); @@ -317,7 +310,6 @@ extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, /* Soft interrupt function to run the hrtimer queues: */ extern void hrtimer_run_queues(void); -extern void hrtimer_run_pending(void); /* Bootup initialization: */ extern void __init hrtimers_init(void); diff --git a/trunk/include/linux/ide.h b/trunk/include/linux/ide.h index 1e4409937ec3..9a6a41e7079f 100644 --- a/trunk/include/linux/ide.h +++ b/trunk/include/linux/ide.h @@ -27,10 +27,25 @@ #include #include -#if defined(CRIS) || defined(FRV) -# define SUPPORT_VLB_SYNC 0 -#else -# define SUPPORT_VLB_SYNC 1 +/****************************************************************************** + * IDE driver configuration options (play with these as desired): + * + * REALLY_SLOW_IO can be defined in ide.c and ide-cd.c, if necessary + */ +#define INITIAL_MULT_COUNT 0 /* off=0; on=2,4,8,16,32, etc.. */ + +#ifndef SUPPORT_SLOW_DATA_PORTS /* 1 to support slow data ports */ +#define SUPPORT_SLOW_DATA_PORTS 1 /* 0 to reduce kernel size */ +#endif +#ifndef SUPPORT_VLB_SYNC /* 1 to support weird 32-bit chips */ +#define SUPPORT_VLB_SYNC 1 /* 0 to reduce kernel size */ +#endif +#ifndef OK_TO_RESET_CONTROLLER /* 1 needed for good error recovery */ +#define OK_TO_RESET_CONTROLLER 1 /* 0 for use with AH2372A/B interface */ +#endif + +#ifndef DISABLE_IRQ_NOSYNC +#define DISABLE_IRQ_NOSYNC 0 #endif /* @@ -40,6 +55,10 @@ #define IDE_NO_IRQ (-1) +/* + * "No user-serviceable parts" beyond this point :) + *****************************************************************************/ + typedef unsigned char byte; /* used everywhere */ /* @@ -84,6 +103,8 @@ typedef unsigned char byte; /* used everywhere */ #define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET #define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET +#define IDE_CONTROL_OFFSET_HOB (7) + #define IDE_DATA_REG (HWIF(drive)->io_ports[IDE_DATA_OFFSET]) #define IDE_ERROR_REG (HWIF(drive)->io_ports[IDE_ERROR_OFFSET]) #define IDE_NSECTOR_REG (HWIF(drive)->io_ports[IDE_NSECTOR_OFFSET]) @@ -306,15 +327,46 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw, typedef union { unsigned all : 8; struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) unsigned set_geometry : 1; unsigned recalibrate : 1; unsigned set_multmode : 1; unsigned set_tune : 1; unsigned serviced : 1; unsigned reserved : 3; +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned reserved : 3; + unsigned serviced : 1; + unsigned set_tune : 1; + unsigned set_multmode : 1; + unsigned recalibrate : 1; + unsigned set_geometry : 1; +#else +#error "Please fix " +#endif } b; } special_t; +/* + * ATA DATA Register Special. + * ATA NSECTOR Count Register(). + * ATAPI Byte Count Register. + */ +typedef union { + unsigned all :16; + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + unsigned low :8; /* LSB */ + unsigned high :8; /* MSB */ +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned high :8; /* MSB */ + unsigned low :8; /* LSB */ +#else +#error "Please fix " +#endif + } b; +} ata_nsector_t, ata_data_t, atapi_bcount_t; + /* * ATA-IDE Select Register, aka Device-Head * @@ -345,6 +397,131 @@ typedef union { } b; } select_t, ata_select_t; +/* + * The ATA-IDE Status Register. + * The ATAPI Status Register. + * + * check : Error occurred + * idx : Index Error + * corr : Correctable error occurred + * drq : Data is request by the device + * dsc : Disk Seek Complete : ata + * : Media access command finished : atapi + * df : Device Fault : ata + * : Reserved : atapi + * drdy : Ready, Command Mode Capable : ata + * : Ignored for ATAPI commands : atapi + * bsy : Disk is Busy + * : The device has access to the command block + */ +typedef union { + unsigned all :8; + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + unsigned check :1; + unsigned idx :1; + unsigned corr :1; + unsigned drq :1; + unsigned dsc :1; + unsigned df :1; + unsigned drdy :1; + unsigned bsy :1; +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned bsy :1; + unsigned drdy :1; + unsigned df :1; + unsigned dsc :1; + unsigned drq :1; + unsigned corr :1; + unsigned idx :1; + unsigned check :1; +#else +#error "Please fix " +#endif + } b; +} ata_status_t, atapi_status_t; + +/* + * ATAPI Feature Register + * + * dma : Using DMA or PIO + * reserved321 : Reserved + * reserved654 : Reserved (Tag Type) + * reserved7 : Reserved + */ +typedef union { + unsigned all :8; + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + unsigned dma :1; + unsigned reserved321 :3; + unsigned reserved654 :3; + unsigned reserved7 :1; +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned reserved7 :1; + unsigned reserved654 :3; + unsigned reserved321 :3; + unsigned dma :1; +#else +#error "Please fix " +#endif + } b; +} atapi_feature_t; + +/* + * ATAPI Interrupt Reason Register. + * + * cod : Information transferred is command (1) or data (0) + * io : The device requests us to read (1) or write (0) + * reserved : Reserved + */ +typedef union { + unsigned all :8; + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + unsigned cod :1; + unsigned io :1; + unsigned reserved :6; +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned reserved :6; + unsigned io :1; + unsigned cod :1; +#else +#error "Please fix " +#endif + } b; +} atapi_ireason_t; + +/* + * The ATAPI error register. + * + * ili : Illegal Length Indication + * eom : End Of Media Detected + * abrt : Aborted command - As defined by ATA + * mcr : Media Change Requested - As defined by ATA + * sense_key : Sense key of the last failed packet command + */ +typedef union { + unsigned all :8; + struct { +#if defined(__LITTLE_ENDIAN_BITFIELD) + unsigned ili :1; + unsigned eom :1; + unsigned abrt :1; + unsigned mcr :1; + unsigned sense_key :4; +#elif defined(__BIG_ENDIAN_BITFIELD) + unsigned sense_key :4; + unsigned mcr :1; + unsigned abrt :1; + unsigned eom :1; + unsigned ili :1; +#else +#error "Please fix " +#endif + } b; +} atapi_error_t; + /* * Status returned from various ide_ functions */ @@ -524,6 +701,8 @@ typedef struct hwif_s { void (*pre_reset)(ide_drive_t *); /* routine to reset controller after a disk reset */ void (*resetproc)(ide_drive_t *); + /* special interrupt handling for shared pci interrupts */ + void (*intrproc)(ide_drive_t *); /* special host masking for drive selection */ void (*maskproc)(ide_drive_t *, int); /* check host's drive quirk list */ @@ -587,6 +766,7 @@ typedef struct hwif_s { int rqsize; /* max sectors per request */ int irq; /* our irq number */ + unsigned long dma_master; /* reference base addr dmabase */ unsigned long dma_base; /* base addr for dma ports */ unsigned long dma_command; /* dma command register */ unsigned long dma_vendor1; /* dma vendor 1 register */ @@ -626,6 +806,7 @@ typedef struct hwif_s { /* * internal ide interrupt handler type */ +typedef ide_startstop_t (ide_pre_handler_t)(ide_drive_t *, struct request *); typedef ide_startstop_t (ide_handler_t)(ide_drive_t *); typedef int (ide_expiry_t)(ide_drive_t *); @@ -839,8 +1020,7 @@ int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, extern void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry); -void ide_execute_command(ide_drive_t *, u8, ide_handler_t *, unsigned int, - ide_expiry_t *); +extern void ide_execute_command(ide_drive_t *, task_ioreg_t cmd, ide_handler_t *, unsigned int, ide_expiry_t *); ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8); @@ -882,114 +1062,52 @@ extern void ide_end_drive_cmd(ide_drive_t *, u8, u8); */ extern int ide_wait_cmd(ide_drive_t *, u8, u8, u8, u8, u8 *); -enum { - IDE_TFLAG_LBA48 = (1 << 0), - IDE_TFLAG_NO_SELECT_MASK = (1 << 1), - IDE_TFLAG_FLAGGED = (1 << 2), - IDE_TFLAG_OUT_DATA = (1 << 3), - IDE_TFLAG_OUT_HOB_FEATURE = (1 << 4), - IDE_TFLAG_OUT_HOB_NSECT = (1 << 5), - IDE_TFLAG_OUT_HOB_LBAL = (1 << 6), - IDE_TFLAG_OUT_HOB_LBAM = (1 << 7), - IDE_TFLAG_OUT_HOB_LBAH = (1 << 8), - IDE_TFLAG_OUT_HOB = IDE_TFLAG_OUT_HOB_FEATURE | - IDE_TFLAG_OUT_HOB_NSECT | - IDE_TFLAG_OUT_HOB_LBAL | - IDE_TFLAG_OUT_HOB_LBAM | - IDE_TFLAG_OUT_HOB_LBAH, - IDE_TFLAG_OUT_FEATURE = (1 << 9), - IDE_TFLAG_OUT_NSECT = (1 << 10), - IDE_TFLAG_OUT_LBAL = (1 << 11), - IDE_TFLAG_OUT_LBAM = (1 << 12), - IDE_TFLAG_OUT_LBAH = (1 << 13), - IDE_TFLAG_OUT_TF = IDE_TFLAG_OUT_FEATURE | - IDE_TFLAG_OUT_NSECT | - IDE_TFLAG_OUT_LBAL | - IDE_TFLAG_OUT_LBAM | - IDE_TFLAG_OUT_LBAH, - IDE_TFLAG_OUT_DEVICE = (1 << 14), - IDE_TFLAG_WRITE = (1 << 15), - IDE_TFLAG_FLAGGED_SET_IN_FLAGS = (1 << 16), - IDE_TFLAG_IN_DATA = (1 << 17), - IDE_TFLAG_CUSTOM_HANDLER = (1 << 18), - IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 19), - IDE_TFLAG_IN_HOB_FEATURE = (1 << 20), - IDE_TFLAG_IN_HOB_NSECT = (1 << 21), - IDE_TFLAG_IN_HOB_LBAL = (1 << 22), - IDE_TFLAG_IN_HOB_LBAM = (1 << 23), - IDE_TFLAG_IN_HOB_LBAH = (1 << 24), - IDE_TFLAG_IN_HOB_LBA = IDE_TFLAG_IN_HOB_LBAL | - IDE_TFLAG_IN_HOB_LBAM | - IDE_TFLAG_IN_HOB_LBAH, - IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE | - IDE_TFLAG_IN_HOB_NSECT | - IDE_TFLAG_IN_HOB_LBA, - IDE_TFLAG_IN_NSECT = (1 << 25), - IDE_TFLAG_IN_LBAL = (1 << 26), - IDE_TFLAG_IN_LBAM = (1 << 27), - IDE_TFLAG_IN_LBAH = (1 << 28), - IDE_TFLAG_IN_LBA = IDE_TFLAG_IN_LBAL | - IDE_TFLAG_IN_LBAM | - IDE_TFLAG_IN_LBAH, - IDE_TFLAG_IN_TF = IDE_TFLAG_IN_NSECT | - IDE_TFLAG_IN_LBA, - IDE_TFLAG_IN_DEVICE = (1 << 29), -}; - -struct ide_taskfile { - u8 hob_data; /* 0: high data byte (for TASKFILE IOCTL) */ - - u8 hob_feature; /* 1-5: additional data to support LBA48 */ - u8 hob_nsect; - u8 hob_lbal; - u8 hob_lbam; - u8 hob_lbah; - - u8 data; /* 6: low data byte (for TASKFILE IOCTL) */ - - union { /*  7: */ - u8 error; /* read: error */ - u8 feature; /* write: feature */ - }; - - u8 nsect; /* 8: number of sectors */ - u8 lbal; /* 9: LBA low */ - u8 lbam; /* 10: LBA mid */ - u8 lbah; /* 11: LBA high */ - - u8 device; /* 12: device select */ - - union { /* 13: */ - u8 status; /*  read: status  */ - u8 command; /* write: command */ - }; -}; - typedef struct ide_task_s { - union { - struct ide_taskfile tf; - u8 tf_array[14]; - }; - u32 tf_flags; +/* + * struct hd_drive_task_hdr tf; + * task_struct_t tf; + * struct hd_drive_hob_hdr hobf; + * hob_struct_t hobf; + */ + task_ioreg_t tfRegister[8]; + task_ioreg_t hobRegister[8]; + ide_reg_valid_t tf_out_flags; + ide_reg_valid_t tf_in_flags; int data_phase; + int command_type; + ide_pre_handler_t *prehandler; + ide_handler_t *handler; struct request *rq; /* copy of request */ void *special; /* valid_t generally */ } ide_task_t; -void ide_tf_load(ide_drive_t *, ide_task_t *); -void ide_tf_read(ide_drive_t *, ide_task_t *); +extern u32 ide_read_24(ide_drive_t *); extern void SELECT_DRIVE(ide_drive_t *); +extern void SELECT_INTERRUPT(ide_drive_t *); extern void SELECT_MASK(ide_drive_t *, int); +extern void QUIRK_LIST(ide_drive_t *); extern int drive_is_ready(ide_drive_t *); -void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8); +/* + * taskfile io for disks for now...and builds request from ide_ioctl + */ +extern ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *); + +/* + * Special Flagged Register Validation Caller + */ +extern ide_startstop_t flagged_taskfile(ide_drive_t *, ide_task_t *); -ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *); +extern ide_startstop_t set_multmode_intr(ide_drive_t *); +extern ide_startstop_t set_geometry_intr(ide_drive_t *); +extern ide_startstop_t recal_intr(ide_drive_t *); +extern ide_startstop_t task_no_data_intr(ide_drive_t *); +extern ide_startstop_t task_in_intr(ide_drive_t *); +extern ide_startstop_t pre_task_out_intr(ide_drive_t *, struct request *); -int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *, u16); -int ide_no_data_taskfile(ide_drive_t *, ide_task_t *); +extern int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *); int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long); int ide_cmd_ioctl(ide_drive_t *, unsigned int, unsigned long); @@ -1094,7 +1212,6 @@ enum { IDE_HFLAG_IO_32BIT = (1 << 24), /* unmask IRQs */ IDE_HFLAG_UNMASK_IRQS = (1 << 25), - IDE_HFLAG_ABUSE_SET_DMA_MODE = (1 << 26), }; #ifdef CONFIG_BLK_DEV_OFFBOARD @@ -1112,7 +1229,7 @@ struct ide_port_info { void (*fixup)(ide_hwif_t *); ide_pci_enablebit_t enablebits[2]; hwif_chipset_t chipset; - u8 extra; + unsigned int extra; u32 host_flags; u8 pio_mask; u8 swdma_mask; @@ -1239,7 +1356,6 @@ static inline int ide_dev_is_sata(struct hd_driveid *id) return 0; } -u64 ide_get_lba_addr(struct ide_taskfile *, int); u8 ide_dump_status(ide_drive_t *, const char *, u8); typedef struct ide_pio_timings_s { diff --git a/trunk/include/linux/init_task.h b/trunk/include/linux/init_task.h index 796019b22b6f..cae35b6b9aec 100644 --- a/trunk/include/linux/init_task.h +++ b/trunk/include/linux/init_task.h @@ -132,12 +132,9 @@ extern struct group_info init_groups; .cpus_allowed = CPU_MASK_ALL, \ .mm = NULL, \ .active_mm = &init_mm, \ - .rt = { \ - .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ - .time_slice = HZ, \ - .nr_cpus_allowed = NR_CPUS, \ - }, \ + .run_list = LIST_HEAD_INIT(tsk.run_list), \ .ioprio = 0, \ + .time_slice = HZ, \ .tasks = LIST_HEAD_INIT(tsk.tasks), \ .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \ diff --git a/trunk/include/linux/interrupt.h b/trunk/include/linux/interrupt.h index c3db4a00f1fa..2306920fa388 100644 --- a/trunk/include/linux/interrupt.h +++ b/trunk/include/linux/interrupt.h @@ -256,7 +256,6 @@ enum #ifdef CONFIG_HIGH_RES_TIMERS HRTIMER_SOFTIRQ, #endif - RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ }; /* softirq mask and active fields moved to irq_cpustat_t in diff --git a/trunk/include/linux/jiffies.h b/trunk/include/linux/jiffies.h index 7ba9e47bf061..8b080024bbc1 100644 --- a/trunk/include/linux/jiffies.h +++ b/trunk/include/linux/jiffies.h @@ -29,12 +29,6 @@ # define SHIFT_HZ 9 #elif HZ >= 768 && HZ < 1536 # define SHIFT_HZ 10 -#elif HZ >= 1536 && HZ < 3072 -# define SHIFT_HZ 11 -#elif HZ >= 3072 && HZ < 6144 -# define SHIFT_HZ 12 -#elif HZ >= 6144 && HZ < 12288 -# define SHIFT_HZ 13 #else # error You lose. #endif diff --git a/trunk/include/linux/kernel.h b/trunk/include/linux/kernel.h index a7283c9beadf..94bc99656963 100644 --- a/trunk/include/linux/kernel.h +++ b/trunk/include/linux/kernel.h @@ -105,8 +105,8 @@ struct user; * supposed to. */ #ifdef CONFIG_PREEMPT_VOLUNTARY -extern int _cond_resched(void); -# define might_resched() _cond_resched() +extern int cond_resched(void); +# define might_resched() cond_resched() #else # define might_resched() do { } while (0) #endif diff --git a/trunk/include/linux/latencytop.h b/trunk/include/linux/latencytop.h deleted file mode 100644 index 901c2d6377a8..000000000000 --- a/trunk/include/linux/latencytop.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * latencytop.h: Infrastructure for displaying latency - * - * (C) Copyright 2008 Intel Corporation - * Author: Arjan van de Ven - * - */ - -#ifndef _INCLUDE_GUARD_LATENCYTOP_H_ -#define _INCLUDE_GUARD_LATENCYTOP_H_ - -#ifdef CONFIG_LATENCYTOP - -#define LT_SAVECOUNT 32 -#define LT_BACKTRACEDEPTH 12 - -struct latency_record { - unsigned long backtrace[LT_BACKTRACEDEPTH]; - unsigned int count; - unsigned long time; - unsigned long max; -}; - - -struct task_struct; - -void account_scheduler_latency(struct task_struct *task, int usecs, int inter); - -void clear_all_latency_tracing(struct task_struct *p); - -#else - -static inline void -account_scheduler_latency(struct task_struct *task, int usecs, int inter) -{ -} - -static inline void clear_all_latency_tracing(struct task_struct *p) -{ -} - -#endif - -#endif diff --git a/trunk/include/linux/notifier.h b/trunk/include/linux/notifier.h index 5dfbc684ce7d..0c40cc0b4a36 100644 --- a/trunk/include/linux/notifier.h +++ b/trunk/include/linux/notifier.h @@ -207,7 +207,9 @@ static inline int notifier_to_errno(int ret) #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ -#define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, +#define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */ +#define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */ +#define CPU_DYING 0x000A /* CPU (unsigned)v not running any task, * not handling interrupts, soon dead */ /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend diff --git a/trunk/include/linux/rcuclassic.h b/trunk/include/linux/rcuclassic.h deleted file mode 100644 index 4d6624260b4c..000000000000 --- a/trunk/include/linux/rcuclassic.h +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Read-Copy Update mechanism for mutual exclusion (classic version) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright IBM Corporation, 2001 - * - * Author: Dipankar Sarma - * - * Based on the original work by Paul McKenney - * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. - * Papers: - * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf - * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) - * - * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU - * - */ - -#ifndef __LINUX_RCUCLASSIC_H -#define __LINUX_RCUCLASSIC_H - -#ifdef __KERNEL__ - -#include -#include -#include -#include -#include -#include - - -/* Global control variables for rcupdate callback mechanism. */ -struct rcu_ctrlblk { - long cur; /* Current batch number. */ - long completed; /* Number of the last completed batch */ - int next_pending; /* Is the next batch already waiting? */ - - int signaled; - - spinlock_t lock ____cacheline_internodealigned_in_smp; - cpumask_t cpumask; /* CPUs that need to switch in order */ - /* for current batch to proceed. */ -} ____cacheline_internodealigned_in_smp; - -/* Is batch a before batch b ? */ -static inline int rcu_batch_before(long a, long b) -{ - return (a - b) < 0; -} - -/* Is batch a after batch b ? */ -static inline int rcu_batch_after(long a, long b) -{ - return (a - b) > 0; -} - -/* - * Per-CPU data for Read-Copy UPdate. - * nxtlist - new callbacks are added here - * curlist - current batch for which quiescent cycle started if any - */ -struct rcu_data { - /* 1) quiescent state handling : */ - long quiescbatch; /* Batch # for grace period */ - int passed_quiesc; /* User-mode/idle loop etc. */ - int qs_pending; /* core waits for quiesc state */ - - /* 2) batch handling */ - long batch; /* Batch # for current RCU batch */ - struct rcu_head *nxtlist; - struct rcu_head **nxttail; - long qlen; /* # of queued callbacks */ - struct rcu_head *curlist; - struct rcu_head **curtail; - struct rcu_head *donelist; - struct rcu_head **donetail; - long blimit; /* Upper limit on a processed batch */ - int cpu; - struct rcu_head barrier; -}; - -DECLARE_PER_CPU(struct rcu_data, rcu_data); -DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); - -/* - * Increment the quiescent state counter. - * The counter is a bit degenerated: We do not need to know - * how many quiescent states passed, just if there was at least - * one since the start of the grace period. Thus just a flag. - */ -static inline void rcu_qsctr_inc(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - rdp->passed_quiesc = 1; -} -static inline void rcu_bh_qsctr_inc(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); - rdp->passed_quiesc = 1; -} - -extern int rcu_pending(int cpu); -extern int rcu_needs_cpu(int cpu); - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -extern struct lockdep_map rcu_lock_map; -# define rcu_read_acquire() \ - lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) -# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) -#else -# define rcu_read_acquire() do { } while (0) -# define rcu_read_release() do { } while (0) -#endif - -#define __rcu_read_lock() \ - do { \ - preempt_disable(); \ - __acquire(RCU); \ - rcu_read_acquire(); \ - } while (0) -#define __rcu_read_unlock() \ - do { \ - rcu_read_release(); \ - __release(RCU); \ - preempt_enable(); \ - } while (0) -#define __rcu_read_lock_bh() \ - do { \ - local_bh_disable(); \ - __acquire(RCU_BH); \ - rcu_read_acquire(); \ - } while (0) -#define __rcu_read_unlock_bh() \ - do { \ - rcu_read_release(); \ - __release(RCU_BH); \ - local_bh_enable(); \ - } while (0) - -#define __synchronize_sched() synchronize_rcu() - -extern void __rcu_init(void); -extern void rcu_check_callbacks(int cpu, int user); -extern void rcu_restart_cpu(int cpu); - -extern long rcu_batches_completed(void); -extern long rcu_batches_completed_bh(void); - -#endif /* __KERNEL__ */ -#endif /* __LINUX_RCUCLASSIC_H */ diff --git a/trunk/include/linux/rcupdate.h b/trunk/include/linux/rcupdate.h index d32c14de270e..cc24a01df940 100644 --- a/trunk/include/linux/rcupdate.h +++ b/trunk/include/linux/rcupdate.h @@ -15,7 +15,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * Copyright IBM Corporation, 2001 + * Copyright (C) IBM Corporation, 2001 * * Author: Dipankar Sarma * @@ -53,18 +53,96 @@ struct rcu_head { void (*func)(struct rcu_head *head); }; -#ifdef CONFIG_CLASSIC_RCU -#include -#else /* #ifdef CONFIG_CLASSIC_RCU */ -#include -#endif /* #else #ifdef CONFIG_CLASSIC_RCU */ - #define RCU_HEAD_INIT { .next = NULL, .func = NULL } #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT #define INIT_RCU_HEAD(ptr) do { \ (ptr)->next = NULL; (ptr)->func = NULL; \ } while (0) + + +/* Global control variables for rcupdate callback mechanism. */ +struct rcu_ctrlblk { + long cur; /* Current batch number. */ + long completed; /* Number of the last completed batch */ + int next_pending; /* Is the next batch already waiting? */ + + int signaled; + + spinlock_t lock ____cacheline_internodealigned_in_smp; + cpumask_t cpumask; /* CPUs that need to switch in order */ + /* for current batch to proceed. */ +} ____cacheline_internodealigned_in_smp; + +/* Is batch a before batch b ? */ +static inline int rcu_batch_before(long a, long b) +{ + return (a - b) < 0; +} + +/* Is batch a after batch b ? */ +static inline int rcu_batch_after(long a, long b) +{ + return (a - b) > 0; +} + +/* + * Per-CPU data for Read-Copy UPdate. + * nxtlist - new callbacks are added here + * curlist - current batch for which quiescent cycle started if any + */ +struct rcu_data { + /* 1) quiescent state handling : */ + long quiescbatch; /* Batch # for grace period */ + int passed_quiesc; /* User-mode/idle loop etc. */ + int qs_pending; /* core waits for quiesc state */ + + /* 2) batch handling */ + long batch; /* Batch # for current RCU batch */ + struct rcu_head *nxtlist; + struct rcu_head **nxttail; + long qlen; /* # of queued callbacks */ + struct rcu_head *curlist; + struct rcu_head **curtail; + struct rcu_head *donelist; + struct rcu_head **donetail; + long blimit; /* Upper limit on a processed batch */ + int cpu; + struct rcu_head barrier; +}; + +DECLARE_PER_CPU(struct rcu_data, rcu_data); +DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); + +/* + * Increment the quiescent state counter. + * The counter is a bit degenerated: We do not need to know + * how many quiescent states passed, just if there was at least + * one since the start of the grace period. Thus just a flag. + */ +static inline void rcu_qsctr_inc(int cpu) +{ + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); + rdp->passed_quiesc = 1; +} +static inline void rcu_bh_qsctr_inc(int cpu) +{ + struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); + rdp->passed_quiesc = 1; +} + +extern int rcu_pending(int cpu); +extern int rcu_needs_cpu(int cpu); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern struct lockdep_map rcu_lock_map; +# define rcu_read_acquire() lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) +# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) +#else +# define rcu_read_acquire() do { } while (0) +# define rcu_read_release() do { } while (0) +#endif + /** * rcu_read_lock - mark the beginning of an RCU read-side critical section. * @@ -94,13 +172,24 @@ struct rcu_head { * * It is illegal to block while in an RCU read-side critical section. */ -#define rcu_read_lock() __rcu_read_lock() +#define rcu_read_lock() \ + do { \ + preempt_disable(); \ + __acquire(RCU); \ + rcu_read_acquire(); \ + } while(0) /** * rcu_read_unlock - marks the end of an RCU read-side critical section. * * See rcu_read_lock() for more information. */ +#define rcu_read_unlock() \ + do { \ + rcu_read_release(); \ + __release(RCU); \ + preempt_enable(); \ + } while(0) /* * So where is rcu_write_lock()? It does not exist, as there is no @@ -111,7 +200,6 @@ struct rcu_head { * used as well. RCU does not care how the writers keep out of each * others' way, as long as they do so. */ -#define rcu_read_unlock() __rcu_read_unlock() /** * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section @@ -124,14 +212,24 @@ struct rcu_head { * can use just rcu_read_lock(). * */ -#define rcu_read_lock_bh() __rcu_read_lock_bh() +#define rcu_read_lock_bh() \ + do { \ + local_bh_disable(); \ + __acquire(RCU_BH); \ + rcu_read_acquire(); \ + } while(0) /* * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ -#define rcu_read_unlock_bh() __rcu_read_unlock_bh() +#define rcu_read_unlock_bh() \ + do { \ + rcu_read_release(); \ + __release(RCU_BH); \ + local_bh_enable(); \ + } while(0) /* * Prevent the compiler from merging or refetching accesses. The compiler @@ -195,52 +293,21 @@ struct rcu_head { * In "classic RCU", these two guarantees happen to be one and * the same, but can differ in realtime RCU implementations. */ -#define synchronize_sched() __synchronize_sched() - -/** - * call_rcu - Queue an RCU callback for invocation after a grace period. - * @head: structure to be used for queueing the RCU updates. - * @func: actual update function to be invoked after the grace period - * - * The update function will be invoked some time after a full grace - * period elapses, in other words after all currently executing RCU - * read-side critical sections have completed. RCU read-side critical - * sections are delimited by rcu_read_lock() and rcu_read_unlock(), - * and may be nested. - */ -extern void call_rcu(struct rcu_head *head, - void (*func)(struct rcu_head *head)); - -/** - * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. - * @head: structure to be used for queueing the RCU updates. - * @func: actual update function to be invoked after the grace period - * - * The update function will be invoked some time after a full grace - * period elapses, in other words after all currently executing RCU - * read-side critical sections have completed. call_rcu_bh() assumes - * that the read-side critical sections end on completion of a softirq - * handler. This means that read-side critical sections in process - * context must not be interrupted by softirqs. This interface is to be - * used when most of the read-side critical sections are in softirq context. - * RCU read-side critical sections are delimited by : - * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. - * OR - * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. - * These may be nested. - */ -extern void call_rcu_bh(struct rcu_head *head, - void (*func)(struct rcu_head *head)); +#define synchronize_sched() synchronize_rcu() -/* Exported common interfaces */ -extern void synchronize_rcu(void); -extern void rcu_barrier(void); +extern void rcu_init(void); +extern void rcu_check_callbacks(int cpu, int user); +extern void rcu_restart_cpu(int cpu); extern long rcu_batches_completed(void); extern long rcu_batches_completed_bh(void); -/* Internal to kernel */ -extern void rcu_init(void); -extern int rcu_needs_cpu(int cpu); +/* Exported interfaces */ +extern void FASTCALL(call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *head))); +extern void FASTCALL(call_rcu_bh(struct rcu_head *head, + void (*func)(struct rcu_head *head))); +extern void synchronize_rcu(void); +extern void rcu_barrier(void); #endif /* __KERNEL__ */ #endif /* __LINUX_RCUPDATE_H */ diff --git a/trunk/include/linux/rcupreempt.h b/trunk/include/linux/rcupreempt.h deleted file mode 100644 index ece8eb3e4151..000000000000 --- a/trunk/include/linux/rcupreempt.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Read-Copy Update mechanism for mutual exclusion (RT implementation) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) IBM Corporation, 2006 - * - * Author: Paul McKenney - * - * Based on the original work by Paul McKenney - * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. - * Papers: - * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf - * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) - * - * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU - * - */ - -#ifndef __LINUX_RCUPREEMPT_H -#define __LINUX_RCUPREEMPT_H - -#ifdef __KERNEL__ - -#include -#include -#include -#include -#include -#include - -#define rcu_qsctr_inc(cpu) -#define rcu_bh_qsctr_inc(cpu) -#define call_rcu_bh(head, rcu) call_rcu(head, rcu) - -extern void __rcu_read_lock(void); -extern void __rcu_read_unlock(void); -extern int rcu_pending(int cpu); -extern int rcu_needs_cpu(int cpu); - -#define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); } -#define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); } - -extern void __synchronize_sched(void); - -extern void __rcu_init(void); -extern void rcu_check_callbacks(int cpu, int user); -extern void rcu_restart_cpu(int cpu); -extern long rcu_batches_completed(void); - -/* - * Return the number of RCU batches processed thus far. Useful for debug - * and statistic. The _bh variant is identifcal to straight RCU - */ -static inline long rcu_batches_completed_bh(void) -{ - return rcu_batches_completed(); -} - -#ifdef CONFIG_RCU_TRACE -struct rcupreempt_trace; -extern long *rcupreempt_flipctr(int cpu); -extern long rcupreempt_data_completed(void); -extern int rcupreempt_flip_flag(int cpu); -extern int rcupreempt_mb_flag(int cpu); -extern char *rcupreempt_try_flip_state_name(void); -extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); -#endif - -struct softirq_action; - -#endif /* __KERNEL__ */ -#endif /* __LINUX_RCUPREEMPT_H */ diff --git a/trunk/include/linux/rcupreempt_trace.h b/trunk/include/linux/rcupreempt_trace.h deleted file mode 100644 index 21cd6b2a5c42..000000000000 --- a/trunk/include/linux/rcupreempt_trace.h +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Read-Copy Update mechanism for mutual exclusion (RT implementation) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) IBM Corporation, 2006 - * - * Author: Paul McKenney - * - * Based on the original work by Paul McKenney - * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. - * Papers: - * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf - * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) - * - * For detailed explanation of the Preemptible Read-Copy Update mechanism see - - * http://lwn.net/Articles/253651/ - */ - -#ifndef __LINUX_RCUPREEMPT_TRACE_H -#define __LINUX_RCUPREEMPT_TRACE_H - -#ifdef __KERNEL__ -#include -#include - -#include - -/* - * PREEMPT_RCU data structures. - */ - -struct rcupreempt_trace { - long next_length; - long next_add; - long wait_length; - long wait_add; - long done_length; - long done_add; - long done_remove; - atomic_t done_invoked; - long rcu_check_callbacks; - atomic_t rcu_try_flip_1; - atomic_t rcu_try_flip_e1; - long rcu_try_flip_i1; - long rcu_try_flip_ie1; - long rcu_try_flip_g1; - long rcu_try_flip_a1; - long rcu_try_flip_ae1; - long rcu_try_flip_a2; - long rcu_try_flip_z1; - long rcu_try_flip_ze1; - long rcu_try_flip_z2; - long rcu_try_flip_m1; - long rcu_try_flip_me1; - long rcu_try_flip_m2; -}; - -#ifdef CONFIG_RCU_TRACE -#define RCU_TRACE(fn, arg) fn(arg); -#else -#define RCU_TRACE(fn, arg) -#endif - -extern void rcupreempt_trace_move2done(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_invoke(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_next_add(struct rcupreempt_trace *trace); - -#endif /* __KERNEL__ */ -#endif /* __LINUX_RCUPREEMPT_TRACE_H */ diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index df5b24ee80b3..d6eacda765ca 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -78,6 +78,7 @@ struct sched_param { #include #include #include +#include #include #include @@ -87,13 +88,11 @@ struct sched_param { #include #include #include -#include #include struct exec_domain; struct futex_pi_state; -struct robust_list_head; struct bio; /* @@ -231,8 +230,6 @@ static inline int select_nohz_load_balancer(int cpu) } #endif -extern unsigned long rt_needs_cpu(int cpu); - /* * Only dump TASK_* tasks. (0 for all tasks) */ @@ -260,19 +257,13 @@ extern void trap_init(void); extern void account_process_tick(struct task_struct *task, int user); extern void update_process_times(int user); extern void scheduler_tick(void); -extern void hrtick_resched(void); - -extern void sched_show_task(struct task_struct *p); #ifdef CONFIG_DETECT_SOFTLOCKUP extern void softlockup_tick(void); extern void spawn_softlockup_task(void); extern void touch_softlockup_watchdog(void); extern void touch_all_softlockup_watchdogs(void); -extern unsigned long softlockup_thresh; -extern unsigned long sysctl_hung_task_check_count; -extern unsigned long sysctl_hung_task_timeout_secs; -extern unsigned long sysctl_hung_task_warnings; +extern int softlockup_thresh; #else static inline void softlockup_tick(void) { @@ -831,7 +822,6 @@ struct sched_class { void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); void (*yield_task) (struct rq *rq); - int (*select_task_rq)(struct task_struct *p, int sync); void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); @@ -847,25 +837,11 @@ struct sched_class { int (*move_one_task) (struct rq *this_rq, int this_cpu, struct rq *busiest, struct sched_domain *sd, enum cpu_idle_type idle); - void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); - void (*post_schedule) (struct rq *this_rq); - void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); #endif void (*set_curr_task) (struct rq *rq); - void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); + void (*task_tick) (struct rq *rq, struct task_struct *p); void (*task_new) (struct rq *rq, struct task_struct *p); - void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask); - - void (*join_domain)(struct rq *rq); - void (*leave_domain)(struct rq *rq); - - void (*switched_from) (struct rq *this_rq, struct task_struct *task, - int running); - void (*switched_to) (struct rq *this_rq, struct task_struct *task, - int running); - void (*prio_changed) (struct rq *this_rq, struct task_struct *task, - int oldprio, int running); }; struct load_weight { @@ -895,8 +871,6 @@ struct sched_entity { #ifdef CONFIG_SCHEDSTATS u64 wait_start; u64 wait_max; - u64 wait_count; - u64 wait_sum; u64 sleep_start; u64 sleep_max; @@ -935,21 +909,6 @@ struct sched_entity { #endif }; -struct sched_rt_entity { - struct list_head run_list; - unsigned int time_slice; - unsigned long timeout; - int nr_cpus_allowed; - -#ifdef CONFIG_FAIR_GROUP_SCHED - struct sched_rt_entity *parent; - /* rq on which this entity is (to be) queued: */ - struct rt_rq *rt_rq; - /* rq "owned" by this entity/group: */ - struct rt_rq *my_q; -#endif -}; - struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ void *stack; @@ -966,9 +925,9 @@ struct task_struct { #endif int prio, static_prio, normal_prio; + struct list_head run_list; const struct sched_class *sched_class; struct sched_entity se; - struct sched_rt_entity rt; #ifdef CONFIG_PREEMPT_NOTIFIERS /* list of struct preempt_notifier: */ @@ -992,11 +951,7 @@ struct task_struct { unsigned int policy; cpumask_t cpus_allowed; - -#ifdef CONFIG_PREEMPT_RCU - int rcu_read_lock_nesting; - int rcu_flipctr_idx; -#endif /* #ifdef CONFIG_PREEMPT_RCU */ + unsigned int time_slice; #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) struct sched_info sched_info; @@ -1086,11 +1041,6 @@ struct task_struct { /* ipc stuff */ struct sysv_sem sysvsem; #endif -#ifdef CONFIG_DETECT_SOFTLOCKUP -/* hung task detection */ - unsigned long last_switch_timestamp; - unsigned long last_switch_count; -#endif /* CPU-specific state of this task */ struct thread_struct thread; /* filesystem information */ @@ -1223,10 +1173,6 @@ struct task_struct { int make_it_fail; #endif struct prop_local_single dirties; -#ifdef CONFIG_LATENCYTOP - int latency_record_count; - struct latency_record latency_record[LT_SAVECOUNT]; -#endif }; /* @@ -1507,12 +1453,6 @@ extern unsigned int sysctl_sched_child_runs_first; extern unsigned int sysctl_sched_features; extern unsigned int sysctl_sched_migration_cost; extern unsigned int sysctl_sched_nr_migrate; -extern unsigned int sysctl_sched_rt_period; -extern unsigned int sysctl_sched_rt_ratio; -#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP) -extern unsigned int sysctl_sched_min_bal_int_shares; -extern unsigned int sysctl_sched_max_bal_int_shares; -#endif int sched_nr_latency_handler(struct ctl_table *table, int write, struct file *file, void __user *buffer, size_t *length, @@ -1905,18 +1845,7 @@ static inline int need_resched(void) * cond_resched_lock() will drop the spinlock before scheduling, * cond_resched_softirq() will enable bhs before scheduling. */ -#ifdef CONFIG_PREEMPT -static inline int cond_resched(void) -{ - return 0; -} -#else -extern int _cond_resched(void); -static inline int cond_resched(void) -{ - return _cond_resched(); -} -#endif +extern int cond_resched(void); extern int cond_resched_lock(spinlock_t * lock); extern int cond_resched_softirq(void); diff --git a/trunk/include/linux/smp_lock.h b/trunk/include/linux/smp_lock.h index aab3a4cff4e1..58962c51dee1 100644 --- a/trunk/include/linux/smp_lock.h +++ b/trunk/include/linux/smp_lock.h @@ -17,10 +17,22 @@ extern void __lockfunc __release_kernel_lock(void); __release_kernel_lock(); \ } while (0) +/* + * Non-SMP kernels will never block on the kernel lock, + * so we are better off returning a constant zero from + * reacquire_kernel_lock() so that the compiler can see + * it at compile-time. + */ +#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL) +# define return_value_on_smp return +#else +# define return_value_on_smp +#endif + static inline int reacquire_kernel_lock(struct task_struct *task) { if (unlikely(task->lock_depth >= 0)) - return __reacquire_kernel_lock(); + return_value_on_smp __reacquire_kernel_lock(); return 0; } diff --git a/trunk/include/linux/stacktrace.h b/trunk/include/linux/stacktrace.h index 5da9794b2d78..e7fa657d0c49 100644 --- a/trunk/include/linux/stacktrace.h +++ b/trunk/include/linux/stacktrace.h @@ -9,13 +9,10 @@ struct stack_trace { }; extern void save_stack_trace(struct stack_trace *trace); -extern void save_stack_trace_tsk(struct task_struct *tsk, - struct stack_trace *trace); extern void print_stack_trace(struct stack_trace *trace, int spaces); #else # define save_stack_trace(trace) do { } while (0) -# define save_stack_trace_tsk(tsk, trace) do { } while (0) # define print_stack_trace(trace, spaces) do { } while (0) #endif diff --git a/trunk/include/linux/topology.h b/trunk/include/linux/topology.h index 2352f46160d3..47729f18bfdf 100644 --- a/trunk/include/linux/topology.h +++ b/trunk/include/linux/topology.h @@ -5,7 +5,7 @@ * * Copyright (C) 2002, IBM Corp. * - * All rights reserved. + * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -103,7 +103,6 @@ .forkexec_idx = 0, \ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_NEWIDLE \ - | SD_BALANCE_FORK \ | SD_BALANCE_EXEC \ | SD_WAKE_AFFINE \ | SD_WAKE_IDLE \ @@ -135,7 +134,6 @@ .forkexec_idx = 1, \ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_NEWIDLE \ - | SD_BALANCE_FORK \ | SD_BALANCE_EXEC \ | SD_WAKE_AFFINE \ | SD_WAKE_IDLE \ @@ -167,7 +165,6 @@ .forkexec_idx = 1, \ .flags = SD_LOAD_BALANCE \ | SD_BALANCE_NEWIDLE \ - | SD_BALANCE_FORK \ | SD_BALANCE_EXEC \ | SD_WAKE_AFFINE \ | BALANCE_FOR_PKG_POWER,\ diff --git a/trunk/include/media/v4l2-common.h b/trunk/include/media/v4l2-common.h index 181a40c46a52..c019c1e99895 100644 --- a/trunk/include/media/v4l2-common.h +++ b/trunk/include/media/v4l2-common.h @@ -116,6 +116,11 @@ struct v4l2_decode_vbi_line { u32 type; /* VBI service type (V4L2_SLICED_*). 0 if no service found */ }; +struct v4l2_priv_tun_config { + int tuner; + void *priv; +}; + /* audio ioctls */ /* v4l device was opened in Radio mode, to be replaced by VIDIOC_INT_S_TUNER_MODE */ @@ -131,7 +136,7 @@ struct v4l2_decode_vbi_line { #define TUNER_SET_STANDBY _IOW('d', 91, int) /* Sets tda9887 specific stuff, like port1, port2 and qss */ -#define TDA9887_SET_CONFIG _IOW('d', 92, int) +#define TUNER_SET_CONFIG _IOW('d', 92, struct v4l2_priv_tun_config) /* Switch the tuner to a specific tuner mode. Replacement of AUDC_SET_RADIO */ #define VIDIOC_INT_S_TUNER_MODE _IOW('d', 93, enum v4l2_tuner_type) diff --git a/trunk/init/Kconfig b/trunk/init/Kconfig index 0eda68f0ad54..f5becd2a12f6 100644 --- a/trunk/init/Kconfig +++ b/trunk/init/Kconfig @@ -763,31 +763,3 @@ source "block/Kconfig" config PREEMPT_NOTIFIERS bool - -choice - prompt "RCU implementation type:" - default CLASSIC_RCU - -config CLASSIC_RCU - bool "Classic RCU" - help - This option selects the classic RCU implementation that is - designed for best read-side performance on non-realtime - systems. - - Say Y if you are unsure. - -config PREEMPT_RCU - bool "Preemptible RCU" - depends on PREEMPT - help - This option reduces the latency of the kernel by making certain - RCU sections preemptible. Normally RCU code is non-preemptible, if - this option is selected then read-only RCU sections become - preemptible. This helps latency, but may expose bugs due to - now-naive assumptions about each RCU read-side critical section - remaining on a given CPU through its execution. - - Say N if you are unsure. - -endchoice diff --git a/trunk/init/main.c b/trunk/init/main.c index f287ca5862b9..80b04b6c5157 100644 --- a/trunk/init/main.c +++ b/trunk/init/main.c @@ -607,7 +607,6 @@ asmlinkage void __init start_kernel(void) vfs_caches_init_early(); cpuset_init_early(); mem_init(); - cpu_hotplug_init(); kmem_cache_init(); setup_per_cpu_pageset(); numa_policy_init(); diff --git a/trunk/kernel/Kconfig.hz b/trunk/kernel/Kconfig.hz index 526128a2e622..4af15802ccd4 100644 --- a/trunk/kernel/Kconfig.hz +++ b/trunk/kernel/Kconfig.hz @@ -54,5 +54,3 @@ config HZ default 300 if HZ_300 default 1000 if HZ_1000 -config SCHED_HRTICK - def_bool HIGH_RES_TIMERS && X86 diff --git a/trunk/kernel/Kconfig.preempt b/trunk/kernel/Kconfig.preempt index 0669b70fa6a3..c64ce9c14207 100644 --- a/trunk/kernel/Kconfig.preempt +++ b/trunk/kernel/Kconfig.preempt @@ -52,13 +52,14 @@ config PREEMPT endchoice -config RCU_TRACE - bool "Enable tracing for RCU - currently stats in debugfs" - select DEBUG_FS +config PREEMPT_BKL + bool "Preempt The Big Kernel Lock" + depends on SMP || PREEMPT default y help - This option provides tracing in RCU which presents stats - in debugfs for debugging RCU implementation. + This option reduces the latency of the kernel by making the + big kernel lock preemptible. - Say Y here if you want to enable RCU tracing + Say Y here if you are building a kernel for a desktop system. Say N if you are unsure. + diff --git a/trunk/kernel/Makefile b/trunk/kernel/Makefile index 390d42146267..dfa96956dae0 100644 --- a/trunk/kernel/Makefile +++ b/trunk/kernel/Makefile @@ -52,17 +52,11 @@ obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ obj-$(CONFIG_SECCOMP) += seccomp.o obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o -obj-$(CONFIG_CLASSIC_RCU) += rcuclassic.o -obj-$(CONFIG_PREEMPT_RCU) += rcupreempt.o -ifeq ($(CONFIG_PREEMPT_RCU),y) -obj-$(CONFIG_RCU_TRACE) += rcupreempt_trace.o -endif obj-$(CONFIG_RELAY) += relay.o obj-$(CONFIG_SYSCTL) += utsname_sysctl.o obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o obj-$(CONFIG_MARKERS) += marker.o -obj-$(CONFIG_LATENCYTOP) += latencytop.o ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) # According to Alan Modra , the -fno-omit-frame-pointer is diff --git a/trunk/kernel/cpu.c b/trunk/kernel/cpu.c index e0d3a4f56ecb..6b3a0c15144f 100644 --- a/trunk/kernel/cpu.c +++ b/trunk/kernel/cpu.c @@ -15,8 +15,9 @@ #include #include -/* Serializes the updates to cpu_online_map, cpu_present_map */ +/* This protects CPUs going up and down... */ static DEFINE_MUTEX(cpu_add_remove_lock); +static DEFINE_MUTEX(cpu_bitmask_lock); static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); @@ -25,123 +26,52 @@ static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); */ static int cpu_hotplug_disabled; -static struct { - struct task_struct *active_writer; - struct mutex lock; /* Synchronizes accesses to refcount, */ - /* - * Also blocks the new readers during - * an ongoing cpu hotplug operation. - */ - int refcount; - wait_queue_head_t writer_queue; -} cpu_hotplug; - -#define writer_exists() (cpu_hotplug.active_writer != NULL) - -void __init cpu_hotplug_init(void) -{ - cpu_hotplug.active_writer = NULL; - mutex_init(&cpu_hotplug.lock); - cpu_hotplug.refcount = 0; - init_waitqueue_head(&cpu_hotplug.writer_queue); -} - #ifdef CONFIG_HOTPLUG_CPU -void get_online_cpus(void) +/* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */ +static struct task_struct *recursive; +static int recursive_depth; + +void lock_cpu_hotplug(void) { - might_sleep(); - if (cpu_hotplug.active_writer == current) + struct task_struct *tsk = current; + + if (tsk == recursive) { + static int warnings = 10; + if (warnings) { + printk(KERN_ERR "Lukewarm IQ detected in hotplug locking\n"); + WARN_ON(1); + warnings--; + } + recursive_depth++; return; - mutex_lock(&cpu_hotplug.lock); - cpu_hotplug.refcount++; - mutex_unlock(&cpu_hotplug.lock); - + } + mutex_lock(&cpu_bitmask_lock); + recursive = tsk; } -EXPORT_SYMBOL_GPL(get_online_cpus); +EXPORT_SYMBOL_GPL(lock_cpu_hotplug); -void put_online_cpus(void) +void unlock_cpu_hotplug(void) { - if (cpu_hotplug.active_writer == current) + WARN_ON(recursive != current); + if (recursive_depth) { + recursive_depth--; return; - mutex_lock(&cpu_hotplug.lock); - cpu_hotplug.refcount--; - - if (unlikely(writer_exists()) && !cpu_hotplug.refcount) - wake_up(&cpu_hotplug.writer_queue); - - mutex_unlock(&cpu_hotplug.lock); - + } + recursive = NULL; + mutex_unlock(&cpu_bitmask_lock); } -EXPORT_SYMBOL_GPL(put_online_cpus); +EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); #endif /* CONFIG_HOTPLUG_CPU */ -/* - * The following two API's must be used when attempting - * to serialize the updates to cpu_online_map, cpu_present_map. - */ -void cpu_maps_update_begin(void) -{ - mutex_lock(&cpu_add_remove_lock); -} - -void cpu_maps_update_done(void) -{ - mutex_unlock(&cpu_add_remove_lock); -} - -/* - * This ensures that the hotplug operation can begin only when the - * refcount goes to zero. - * - * Note that during a cpu-hotplug operation, the new readers, if any, - * will be blocked by the cpu_hotplug.lock - * - * Since cpu_maps_update_begin is always called after invoking - * cpu_maps_update_begin, we can be sure that only one writer is active. - * - * Note that theoretically, there is a possibility of a livelock: - * - Refcount goes to zero, last reader wakes up the sleeping - * writer. - * - Last reader unlocks the cpu_hotplug.lock. - * - A new reader arrives at this moment, bumps up the refcount. - * - The writer acquires the cpu_hotplug.lock finds the refcount - * non zero and goes to sleep again. - * - * However, this is very difficult to achieve in practice since - * get_online_cpus() not an api which is called all that often. - * - */ -static void cpu_hotplug_begin(void) -{ - DECLARE_WAITQUEUE(wait, current); - - mutex_lock(&cpu_hotplug.lock); - - cpu_hotplug.active_writer = current; - add_wait_queue_exclusive(&cpu_hotplug.writer_queue, &wait); - while (cpu_hotplug.refcount) { - set_current_state(TASK_UNINTERRUPTIBLE); - mutex_unlock(&cpu_hotplug.lock); - schedule(); - mutex_lock(&cpu_hotplug.lock); - } - remove_wait_queue_locked(&cpu_hotplug.writer_queue, &wait); -} - -static void cpu_hotplug_done(void) -{ - cpu_hotplug.active_writer = NULL; - mutex_unlock(&cpu_hotplug.lock); -} /* Need to know about CPUs going up/down? */ int __cpuinit register_cpu_notifier(struct notifier_block *nb) { int ret; - cpu_maps_update_begin(); + mutex_lock(&cpu_add_remove_lock); ret = raw_notifier_chain_register(&cpu_chain, nb); - cpu_maps_update_done(); + mutex_unlock(&cpu_add_remove_lock); return ret; } @@ -151,9 +81,9 @@ EXPORT_SYMBOL(register_cpu_notifier); void unregister_cpu_notifier(struct notifier_block *nb) { - cpu_maps_update_begin(); + mutex_lock(&cpu_add_remove_lock); raw_notifier_chain_unregister(&cpu_chain, nb); - cpu_maps_update_done(); + mutex_unlock(&cpu_add_remove_lock); } EXPORT_SYMBOL(unregister_cpu_notifier); @@ -217,7 +147,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen) if (!cpu_online(cpu)) return -EINVAL; - cpu_hotplug_begin(); + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err == NOTIFY_BAD) { @@ -236,7 +166,9 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen) cpu_clear(cpu, tmp); set_cpus_allowed(current, tmp); + mutex_lock(&cpu_bitmask_lock); p = __stop_machine_run(take_cpu_down, &tcd_param, cpu); + mutex_unlock(&cpu_bitmask_lock); if (IS_ERR(p) || cpu_online(cpu)) { /* CPU didn't die: tell everyone. Can't complain. */ @@ -270,7 +202,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen) out_allowed: set_cpus_allowed(current, old_allowed); out_release: - cpu_hotplug_done(); + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); return err; } @@ -278,13 +210,13 @@ int cpu_down(unsigned int cpu) { int err = 0; - cpu_maps_update_begin(); + mutex_lock(&cpu_add_remove_lock); if (cpu_hotplug_disabled) err = -EBUSY; else err = _cpu_down(cpu, 0); - cpu_maps_update_done(); + mutex_unlock(&cpu_add_remove_lock); return err; } #endif /*CONFIG_HOTPLUG_CPU*/ @@ -299,7 +231,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) if (cpu_online(cpu) || !cpu_present(cpu)) return -EINVAL; - cpu_hotplug_begin(); + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_ACQUIRE, hcpu); ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); if (ret == NOTIFY_BAD) { @@ -311,7 +243,9 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) } /* Arch-specific enabling code. */ + mutex_lock(&cpu_bitmask_lock); ret = __cpu_up(cpu); + mutex_unlock(&cpu_bitmask_lock); if (ret != 0) goto out_notify; BUG_ON(!cpu_online(cpu)); @@ -323,7 +257,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) if (ret != 0) __raw_notifier_call_chain(&cpu_chain, CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); - cpu_hotplug_done(); + raw_notifier_call_chain(&cpu_chain, CPU_LOCK_RELEASE, hcpu); return ret; } @@ -341,13 +275,13 @@ int __cpuinit cpu_up(unsigned int cpu) return -EINVAL; } - cpu_maps_update_begin(); + mutex_lock(&cpu_add_remove_lock); if (cpu_hotplug_disabled) err = -EBUSY; else err = _cpu_up(cpu, 0); - cpu_maps_update_done(); + mutex_unlock(&cpu_add_remove_lock); return err; } @@ -358,7 +292,7 @@ int disable_nonboot_cpus(void) { int cpu, first_cpu, error = 0; - cpu_maps_update_begin(); + mutex_lock(&cpu_add_remove_lock); first_cpu = first_cpu(cpu_online_map); /* We take down all of the non-boot CPUs in one shot to avoid races * with the userspace trying to use the CPU hotplug at the same time @@ -385,7 +319,7 @@ int disable_nonboot_cpus(void) } else { printk(KERN_ERR "Non-boot CPUs are not disabled\n"); } - cpu_maps_update_done(); + mutex_unlock(&cpu_add_remove_lock); return error; } @@ -394,7 +328,7 @@ void enable_nonboot_cpus(void) int cpu, error; /* Allow everyone to use the CPU hotplug again */ - cpu_maps_update_begin(); + mutex_lock(&cpu_add_remove_lock); cpu_hotplug_disabled = 0; if (cpus_empty(frozen_cpus)) goto out; @@ -410,6 +344,6 @@ void enable_nonboot_cpus(void) } cpus_clear(frozen_cpus); out: - cpu_maps_update_done(); + mutex_unlock(&cpu_add_remove_lock); } #endif /* CONFIG_PM_SLEEP_SMP */ diff --git a/trunk/kernel/cpuset.c b/trunk/kernel/cpuset.c index cfaf6419d817..50f5dc463688 100644 --- a/trunk/kernel/cpuset.c +++ b/trunk/kernel/cpuset.c @@ -537,10 +537,10 @@ static int cpusets_overlap(struct cpuset *a, struct cpuset *b) * * Call with cgroup_mutex held. May take callback_mutex during * call due to the kfifo_alloc() and kmalloc() calls. May nest - * a call to the get_online_cpus()/put_online_cpus() pair. + * a call to the lock_cpu_hotplug()/unlock_cpu_hotplug() pair. * Must not be called holding callback_mutex, because we must not - * call get_online_cpus() while holding callback_mutex. Elsewhere - * the kernel nests callback_mutex inside get_online_cpus() calls. + * call lock_cpu_hotplug() while holding callback_mutex. Elsewhere + * the kernel nests callback_mutex inside lock_cpu_hotplug() calls. * So the reverse nesting would risk an ABBA deadlock. * * The three key local variables below are: @@ -691,9 +691,9 @@ static void rebuild_sched_domains(void) rebuild: /* Have scheduler rebuild sched domains */ - get_online_cpus(); + lock_cpu_hotplug(); partition_sched_domains(ndoms, doms); - put_online_cpus(); + unlock_cpu_hotplug(); done: if (q && !IS_ERR(q)) @@ -1617,10 +1617,10 @@ static struct cgroup_subsys_state *cpuset_create( * * If the cpuset being removed has its flag 'sched_load_balance' * enabled, then simulate turning sched_load_balance off, which - * will call rebuild_sched_domains(). The get_online_cpus() + * will call rebuild_sched_domains(). The lock_cpu_hotplug() * call in rebuild_sched_domains() must not be made while holding * callback_mutex. Elsewhere the kernel nests callback_mutex inside - * get_online_cpus() calls. So the reverse nesting would risk an + * lock_cpu_hotplug() calls. So the reverse nesting would risk an * ABBA deadlock. */ diff --git a/trunk/kernel/fork.c b/trunk/kernel/fork.c index 39d22b3357de..8dd8ff281009 100644 --- a/trunk/kernel/fork.c +++ b/trunk/kernel/fork.c @@ -1045,10 +1045,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, copy_flags(clone_flags, p); INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); -#ifdef CONFIG_PREEMPT_RCU - p->rcu_read_lock_nesting = 0; - p->rcu_flipctr_idx = 0; -#endif /* #ifdef CONFIG_PREEMPT_RCU */ p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); @@ -1063,11 +1059,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->prev_utime = cputime_zero; p->prev_stime = cputime_zero; -#ifdef CONFIG_DETECT_SOFTLOCKUP - p->last_switch_count = 0; - p->last_switch_timestamp = 0; -#endif - #ifdef CONFIG_TASK_XACCT p->rchar = 0; /* I/O counter: bytes read */ p->wchar = 0; /* I/O counter: bytes written */ @@ -1205,7 +1196,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, #ifdef TIF_SYSCALL_EMU clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); #endif - clear_all_latency_tracing(p); /* Our parent execution domain becomes current domain These must match for thread signalling to apply */ @@ -1247,7 +1237,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, * parent's CPU). This avoids alot of nasty races. */ p->cpus_allowed = current->cpus_allowed; - p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed; if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || !cpu_online(task_cpu(p)))) set_task_cpu(p, smp_processor_id()); diff --git a/trunk/kernel/hrtimer.c b/trunk/kernel/hrtimer.c index bd5d6b5060bc..f994bb8065e6 100644 --- a/trunk/kernel/hrtimer.c +++ b/trunk/kernel/hrtimer.c @@ -325,22 +325,6 @@ unsigned long ktime_divns(const ktime_t kt, s64 div) } #endif /* BITS_PER_LONG >= 64 */ -/* - * Check, whether the timer is on the callback pending list - */ -static inline int hrtimer_cb_pending(const struct hrtimer *timer) -{ - return timer->state & HRTIMER_STATE_PENDING; -} - -/* - * Remove a timer from the callback pending list - */ -static inline void hrtimer_remove_cb_pending(struct hrtimer *timer) -{ - list_del_init(&timer->cb_entry); -} - /* High resolution timer related functions */ #ifdef CONFIG_HIGH_RES_TIMERS @@ -509,6 +493,22 @@ void hres_timers_resume(void) retrigger_next_event(NULL); } +/* + * Check, whether the timer is on the callback pending list + */ +static inline int hrtimer_cb_pending(const struct hrtimer *timer) +{ + return timer->state & HRTIMER_STATE_PENDING; +} + +/* + * Remove a timer from the callback pending list + */ +static inline void hrtimer_remove_cb_pending(struct hrtimer *timer) +{ + list_del_init(&timer->cb_entry); +} + /* * Initialize the high resolution related parts of cpu_base */ @@ -516,6 +516,7 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { base->expires_next.tv64 = KTIME_MAX; base->hres_active = 0; + INIT_LIST_HEAD(&base->cb_pending); } /* @@ -523,6 +524,7 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) */ static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { + INIT_LIST_HEAD(&timer->cb_entry); } /* @@ -616,13 +618,10 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, { return 0; } +static inline int hrtimer_cb_pending(struct hrtimer *timer) { return 0; } +static inline void hrtimer_remove_cb_pending(struct hrtimer *timer) { } static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } -static inline int hrtimer_reprogram(struct hrtimer *timer, - struct hrtimer_clock_base *base) -{ - return 0; -} #endif /* CONFIG_HIGH_RES_TIMERS */ @@ -1002,7 +1001,6 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, clock_id = CLOCK_MONOTONIC; timer->base = &cpu_base->clock_base[clock_id]; - INIT_LIST_HEAD(&timer->cb_entry); hrtimer_init_timer_hres(timer); #ifdef CONFIG_TIMER_STATS @@ -1032,85 +1030,6 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) } EXPORT_SYMBOL_GPL(hrtimer_get_res); -static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base) -{ - spin_lock_irq(&cpu_base->lock); - - while (!list_empty(&cpu_base->cb_pending)) { - enum hrtimer_restart (*fn)(struct hrtimer *); - struct hrtimer *timer; - int restart; - - timer = list_entry(cpu_base->cb_pending.next, - struct hrtimer, cb_entry); - - timer_stats_account_hrtimer(timer); - - fn = timer->function; - __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0); - spin_unlock_irq(&cpu_base->lock); - - restart = fn(timer); - - spin_lock_irq(&cpu_base->lock); - - timer->state &= ~HRTIMER_STATE_CALLBACK; - if (restart == HRTIMER_RESTART) { - BUG_ON(hrtimer_active(timer)); - /* - * Enqueue the timer, allow reprogramming of the event - * device - */ - enqueue_hrtimer(timer, timer->base, 1); - } else if (hrtimer_active(timer)) { - /* - * If the timer was rearmed on another CPU, reprogram - * the event device. - */ - if (timer->base->first == &timer->node) - hrtimer_reprogram(timer, timer->base); - } - } - spin_unlock_irq(&cpu_base->lock); -} - -static void __run_hrtimer(struct hrtimer *timer) -{ - struct hrtimer_clock_base *base = timer->base; - struct hrtimer_cpu_base *cpu_base = base->cpu_base; - enum hrtimer_restart (*fn)(struct hrtimer *); - int restart; - - __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); - timer_stats_account_hrtimer(timer); - - fn = timer->function; - if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) { - /* - * Used for scheduler timers, avoid lock inversion with - * rq->lock and tasklist_lock. - * - * These timers are required to deal with enqueue expiry - * themselves and are not allowed to migrate. - */ - spin_unlock(&cpu_base->lock); - restart = fn(timer); - spin_lock(&cpu_base->lock); - } else - restart = fn(timer); - - /* - * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid - * reprogramming of the event hardware. This happens at the end of this - * function anyway. - */ - if (restart != HRTIMER_NORESTART) { - BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); - enqueue_hrtimer(timer, base, 0); - } - timer->state &= ~HRTIMER_STATE_CALLBACK; -} - #ifdef CONFIG_HIGH_RES_TIMERS /* @@ -1168,7 +1087,21 @@ void hrtimer_interrupt(struct clock_event_device *dev) continue; } - __run_hrtimer(timer); + __remove_hrtimer(timer, base, + HRTIMER_STATE_CALLBACK, 0); + timer_stats_account_hrtimer(timer); + + /* + * Note: We clear the CALLBACK bit after + * enqueue_hrtimer to avoid reprogramming of + * the event hardware. This happens at the end + * of this function anyway. + */ + if (timer->function(timer) != HRTIMER_NORESTART) { + BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); + enqueue_hrtimer(timer, base, 0); + } + timer->state &= ~HRTIMER_STATE_CALLBACK; } spin_unlock(&cpu_base->lock); base++; @@ -1189,41 +1122,52 @@ void hrtimer_interrupt(struct clock_event_device *dev) static void run_hrtimer_softirq(struct softirq_action *h) { - run_hrtimer_pending(&__get_cpu_var(hrtimer_bases)); -} + struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); -#endif /* CONFIG_HIGH_RES_TIMERS */ + spin_lock_irq(&cpu_base->lock); -/* - * Called from timer softirq every jiffy, expire hrtimers: - * - * For HRT its the fall back code to run the softirq in the timer - * softirq context in case the hrtimer initialization failed or has - * not been done yet. - */ -void hrtimer_run_pending(void) -{ - struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); + while (!list_empty(&cpu_base->cb_pending)) { + enum hrtimer_restart (*fn)(struct hrtimer *); + struct hrtimer *timer; + int restart; - if (hrtimer_hres_active()) - return; + timer = list_entry(cpu_base->cb_pending.next, + struct hrtimer, cb_entry); - /* - * This _is_ ugly: We have to check in the softirq context, - * whether we can switch to highres and / or nohz mode. The - * clocksource switch happens in the timer interrupt with - * xtime_lock held. Notification from there only sets the - * check bit in the tick_oneshot code, otherwise we might - * deadlock vs. xtime_lock. - */ - if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) - hrtimer_switch_to_hres(); + timer_stats_account_hrtimer(timer); + + fn = timer->function; + __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0); + spin_unlock_irq(&cpu_base->lock); + + restart = fn(timer); + + spin_lock_irq(&cpu_base->lock); - run_hrtimer_pending(cpu_base); + timer->state &= ~HRTIMER_STATE_CALLBACK; + if (restart == HRTIMER_RESTART) { + BUG_ON(hrtimer_active(timer)); + /* + * Enqueue the timer, allow reprogramming of the event + * device + */ + enqueue_hrtimer(timer, timer->base, 1); + } else if (hrtimer_active(timer)) { + /* + * If the timer was rearmed on another CPU, reprogram + * the event device. + */ + if (timer->base->first == &timer->node) + hrtimer_reprogram(timer, timer->base); + } + } + spin_unlock_irq(&cpu_base->lock); } +#endif /* CONFIG_HIGH_RES_TIMERS */ + /* - * Called from hardirq context every jiffy + * Expire the per base hrtimer-queue: */ static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base, int index) @@ -1237,27 +1181,46 @@ static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base, if (base->get_softirq_time) base->softirq_time = base->get_softirq_time(); - spin_lock(&cpu_base->lock); + spin_lock_irq(&cpu_base->lock); while ((node = base->first)) { struct hrtimer *timer; + enum hrtimer_restart (*fn)(struct hrtimer *); + int restart; timer = rb_entry(node, struct hrtimer, node); if (base->softirq_time.tv64 <= timer->expires.tv64) break; - if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { - __remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0); - list_add_tail(&timer->cb_entry, - &base->cpu_base->cb_pending); - continue; - } +#ifdef CONFIG_HIGH_RES_TIMERS + WARN_ON_ONCE(timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ); +#endif + timer_stats_account_hrtimer(timer); + + fn = timer->function; + __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); + spin_unlock_irq(&cpu_base->lock); + + restart = fn(timer); - __run_hrtimer(timer); + spin_lock_irq(&cpu_base->lock); + + timer->state &= ~HRTIMER_STATE_CALLBACK; + if (restart != HRTIMER_NORESTART) { + BUG_ON(hrtimer_active(timer)); + enqueue_hrtimer(timer, base, 0); + } } - spin_unlock(&cpu_base->lock); + spin_unlock_irq(&cpu_base->lock); } +/* + * Called from timer softirq every jiffy, expire hrtimers: + * + * For HRT its the fall back code to run the softirq in the timer + * softirq context in case the hrtimer initialization failed or has + * not been done yet. + */ void hrtimer_run_queues(void) { struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); @@ -1266,6 +1229,18 @@ void hrtimer_run_queues(void) if (hrtimer_hres_active()) return; + /* + * This _is_ ugly: We have to check in the softirq context, + * whether we can switch to highres and / or nohz mode. The + * clocksource switch happens in the timer interrupt with + * xtime_lock held. Notification from there only sets the + * check bit in the tick_oneshot code, otherwise we might + * deadlock vs. xtime_lock. + */ + if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) + if (hrtimer_switch_to_hres()) + return; + hrtimer_get_softirq_time(cpu_base); for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) @@ -1293,7 +1268,7 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) sl->timer.function = hrtimer_wakeup; sl->task = task; #ifdef CONFIG_HIGH_RES_TIMERS - sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; + sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_RESTART; #endif } @@ -1304,8 +1279,6 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod do { set_current_state(TASK_INTERRUPTIBLE); hrtimer_start(&t->timer, t->timer.expires, mode); - if (!hrtimer_active(&t->timer)) - t->task = NULL; if (likely(t->task)) schedule(); @@ -1416,7 +1389,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu) for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) cpu_base->clock_base[i].cpu_base = cpu_base; - INIT_LIST_HEAD(&cpu_base->cb_pending); hrtimer_init_hres(cpu_base); } diff --git a/trunk/kernel/kthread.c b/trunk/kernel/kthread.c index 0ac887882f90..dcfe724300eb 100644 --- a/trunk/kernel/kthread.c +++ b/trunk/kernel/kthread.c @@ -15,8 +15,6 @@ #include #include -#define KTHREAD_NICE_LEVEL (-5) - static DEFINE_SPINLOCK(kthread_create_lock); static LIST_HEAD(kthread_create_list); struct task_struct *kthreadd_task; @@ -96,18 +94,10 @@ static void create_kthread(struct kthread_create_info *create) if (pid < 0) { create->result = ERR_PTR(pid); } else { - struct sched_param param = { .sched_priority = 0 }; wait_for_completion(&create->started); read_lock(&tasklist_lock); create->result = find_task_by_pid(pid); read_unlock(&tasklist_lock); - /* - * root may have changed our (kthreadd's) priority or CPU mask. - * The kernel thread should not inherit these properties. - */ - sched_setscheduler(create->result, SCHED_NORMAL, ¶m); - set_user_nice(create->result, KTHREAD_NICE_LEVEL); - set_cpus_allowed(create->result, CPU_MASK_ALL); } complete(&create->done); } @@ -231,7 +221,7 @@ int kthreadd(void *unused) /* Setup a clean context for our children to inherit. */ set_task_comm(tsk, "kthreadd"); ignore_signals(tsk); - set_user_nice(tsk, KTHREAD_NICE_LEVEL); + set_user_nice(tsk, -5); set_cpus_allowed(tsk, CPU_MASK_ALL); current->flags |= PF_NOFREEZE; diff --git a/trunk/kernel/latencytop.c b/trunk/kernel/latencytop.c deleted file mode 100644 index b4e3c85abe74..000000000000 --- a/trunk/kernel/latencytop.c +++ /dev/null @@ -1,239 +0,0 @@ -/* - * latencytop.c: Latency display infrastructure - * - * (C) Copyright 2008 Intel Corporation - * Author: Arjan van de Ven - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; version 2 - * of the License. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static DEFINE_SPINLOCK(latency_lock); - -#define MAXLR 128 -static struct latency_record latency_record[MAXLR]; - -int latencytop_enabled; - -void clear_all_latency_tracing(struct task_struct *p) -{ - unsigned long flags; - - if (!latencytop_enabled) - return; - - spin_lock_irqsave(&latency_lock, flags); - memset(&p->latency_record, 0, sizeof(p->latency_record)); - p->latency_record_count = 0; - spin_unlock_irqrestore(&latency_lock, flags); -} - -static void clear_global_latency_tracing(void) -{ - unsigned long flags; - - spin_lock_irqsave(&latency_lock, flags); - memset(&latency_record, 0, sizeof(latency_record)); - spin_unlock_irqrestore(&latency_lock, flags); -} - -static void __sched -account_global_scheduler_latency(struct task_struct *tsk, struct latency_record *lat) -{ - int firstnonnull = MAXLR + 1; - int i; - - if (!latencytop_enabled) - return; - - /* skip kernel threads for now */ - if (!tsk->mm) - return; - - for (i = 0; i < MAXLR; i++) { - int q; - int same = 1; - /* Nothing stored: */ - if (!latency_record[i].backtrace[0]) { - if (firstnonnull > i) - firstnonnull = i; - continue; - } - for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { - if (latency_record[i].backtrace[q] != - lat->backtrace[q]) - same = 0; - if (same && lat->backtrace[q] == 0) - break; - if (same && lat->backtrace[q] == ULONG_MAX) - break; - } - if (same) { - latency_record[i].count++; - latency_record[i].time += lat->time; - if (lat->time > latency_record[i].max) - latency_record[i].max = lat->time; - return; - } - } - - i = firstnonnull; - if (i >= MAXLR - 1) - return; - - /* Allocted a new one: */ - memcpy(&latency_record[i], lat, sizeof(struct latency_record)); -} - -static inline void store_stacktrace(struct task_struct *tsk, struct latency_record *lat) -{ - struct stack_trace trace; - - memset(&trace, 0, sizeof(trace)); - trace.max_entries = LT_BACKTRACEDEPTH; - trace.entries = &lat->backtrace[0]; - trace.skip = 0; - save_stack_trace_tsk(tsk, &trace); -} - -void __sched -account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) -{ - unsigned long flags; - int i, q; - struct latency_record lat; - - if (!latencytop_enabled) - return; - - /* Long interruptible waits are generally user requested... */ - if (inter && usecs > 5000) - return; - - memset(&lat, 0, sizeof(lat)); - lat.count = 1; - lat.time = usecs; - lat.max = usecs; - store_stacktrace(tsk, &lat); - - spin_lock_irqsave(&latency_lock, flags); - - account_global_scheduler_latency(tsk, &lat); - - /* - * short term hack; if we're > 32 we stop; future we recycle: - */ - tsk->latency_record_count++; - if (tsk->latency_record_count >= LT_SAVECOUNT) - goto out_unlock; - - for (i = 0; i < LT_SAVECOUNT ; i++) { - struct latency_record *mylat; - int same = 1; - mylat = &tsk->latency_record[i]; - for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { - if (mylat->backtrace[q] != - lat.backtrace[q]) - same = 0; - if (same && lat.backtrace[q] == 0) - break; - if (same && lat.backtrace[q] == ULONG_MAX) - break; - } - if (same) { - mylat->count++; - mylat->time += lat.time; - if (lat.time > mylat->max) - mylat->max = lat.time; - goto out_unlock; - } - } - - /* Allocated a new one: */ - i = tsk->latency_record_count; - memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); - -out_unlock: - spin_unlock_irqrestore(&latency_lock, flags); -} - -static int lstats_show(struct seq_file *m, void *v) -{ - int i; - - seq_puts(m, "Latency Top version : v0.1\n"); - - for (i = 0; i < MAXLR; i++) { - if (latency_record[i].backtrace[0]) { - int q; - seq_printf(m, "%i %li %li ", - latency_record[i].count, - latency_record[i].time, - latency_record[i].max); - for (q = 0; q < LT_BACKTRACEDEPTH; q++) { - char sym[KSYM_NAME_LEN]; - char *c; - if (!latency_record[i].backtrace[q]) - break; - if (latency_record[i].backtrace[q] == ULONG_MAX) - break; - sprint_symbol(sym, latency_record[i].backtrace[q]); - c = strchr(sym, '+'); - if (c) - *c = 0; - seq_printf(m, "%s ", sym); - } - seq_printf(m, "\n"); - } - } - return 0; -} - -static ssize_t -lstats_write(struct file *file, const char __user *buf, size_t count, - loff_t *offs) -{ - clear_global_latency_tracing(); - - return count; -} - -static int lstats_open(struct inode *inode, struct file *filp) -{ - return single_open(filp, lstats_show, NULL); -} - -static struct file_operations lstats_fops = { - .open = lstats_open, - .read = seq_read, - .write = lstats_write, - .llseek = seq_lseek, - .release = single_release, -}; - -static int __init init_lstats_procfs(void) -{ - struct proc_dir_entry *pe; - - pe = create_proc_entry("latency_stats", 0644, NULL); - if (!pe) - return -ENOMEM; - - pe->proc_fops = &lstats_fops; - - return 0; -} -__initcall(init_lstats_procfs); diff --git a/trunk/kernel/lockdep.c b/trunk/kernel/lockdep.c index 3574379f4d62..e2c07ece367d 100644 --- a/trunk/kernel/lockdep.c +++ b/trunk/kernel/lockdep.c @@ -3206,11 +3206,7 @@ void debug_show_all_locks(void) EXPORT_SYMBOL_GPL(debug_show_all_locks); -/* - * Careful: only use this function if you are sure that - * the task cannot run in parallel! - */ -void __debug_show_held_locks(struct task_struct *task) +void debug_show_held_locks(struct task_struct *task) { if (unlikely(!debug_locks)) { printk("INFO: lockdep is turned off.\n"); @@ -3218,12 +3214,6 @@ void __debug_show_held_locks(struct task_struct *task) } lockdep_print_held_locks(task); } -EXPORT_SYMBOL_GPL(__debug_show_held_locks); - -void debug_show_held_locks(struct task_struct *task) -{ - __debug_show_held_locks(task); -} EXPORT_SYMBOL_GPL(debug_show_held_locks); diff --git a/trunk/kernel/module.c b/trunk/kernel/module.c index 1bb4c5e0d56e..dcb8a2cbf75e 100644 --- a/trunk/kernel/module.c +++ b/trunk/kernel/module.c @@ -496,8 +496,6 @@ static struct module_attribute modinfo_##field = { \ MODINFO_ATTR(version); MODINFO_ATTR(srcversion); -static char last_unloaded_module[MODULE_NAME_LEN+1]; - #ifdef CONFIG_MODULE_UNLOAD /* Init the unload section of the module. */ static void module_unload_init(struct module *mod) @@ -721,8 +719,6 @@ sys_delete_module(const char __user *name_user, unsigned int flags) mod->exit(); mutex_lock(&module_mutex); } - /* Store the name of the last unloaded module for diagnostic purposes */ - sprintf(last_unloaded_module, mod->name); free_module(mod); out: @@ -2361,30 +2357,21 @@ static void m_stop(struct seq_file *m, void *p) mutex_unlock(&module_mutex); } -static char *module_flags(struct module *mod, char *buf) +static char *taint_flags(unsigned int taints, char *buf) { int bx = 0; - if (mod->taints || - mod->state == MODULE_STATE_GOING || - mod->state == MODULE_STATE_COMING) { + if (taints) { buf[bx++] = '('; - if (mod->taints & TAINT_PROPRIETARY_MODULE) + if (taints & TAINT_PROPRIETARY_MODULE) buf[bx++] = 'P'; - if (mod->taints & TAINT_FORCED_MODULE) + if (taints & TAINT_FORCED_MODULE) buf[bx++] = 'F'; /* * TAINT_FORCED_RMMOD: could be added. * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't * apply to modules. */ - - /* Show a - for module-is-being-unloaded */ - if (mod->state == MODULE_STATE_GOING) - buf[bx++] = '-'; - /* Show a + for module-is-being-loaded */ - if (mod->state == MODULE_STATE_COMING) - buf[bx++] = '+'; buf[bx++] = ')'; } buf[bx] = '\0'; @@ -2411,7 +2398,7 @@ static int m_show(struct seq_file *m, void *p) /* Taints info */ if (mod->taints) - seq_printf(m, " %s", module_flags(mod, buf)); + seq_printf(m, " %s", taint_flags(mod->taints, buf)); seq_printf(m, "\n"); return 0; @@ -2506,9 +2493,7 @@ void print_modules(void) printk("Modules linked in:"); list_for_each_entry(mod, &modules, list) - printk(" %s%s", mod->name, module_flags(mod, buf)); - if (last_unloaded_module[0]) - printk(" [last unloaded: %s]", last_unloaded_module); + printk(" %s%s", mod->name, taint_flags(mod->taints, buf)); printk("\n"); } diff --git a/trunk/kernel/posix-cpu-timers.c b/trunk/kernel/posix-cpu-timers.c index 0b7c82ac467e..68c96376e84a 100644 --- a/trunk/kernel/posix-cpu-timers.c +++ b/trunk/kernel/posix-cpu-timers.c @@ -967,7 +967,6 @@ static void check_thread_timers(struct task_struct *tsk, { int maxfire; struct list_head *timers = tsk->cpu_timers; - struct signal_struct *const sig = tsk->signal; maxfire = 20; tsk->it_prof_expires = cputime_zero; @@ -1012,35 +1011,6 @@ static void check_thread_timers(struct task_struct *tsk, t->firing = 1; list_move_tail(&t->entry, firing); } - - /* - * Check for the special case thread timers. - */ - if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) { - unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max; - unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur; - - if (hard != RLIM_INFINITY && - tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { - /* - * At the hard limit, we just die. - * No need to calculate anything else now. - */ - __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); - return; - } - if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) { - /* - * At the soft limit, send a SIGXCPU every second. - */ - if (sig->rlim[RLIMIT_RTTIME].rlim_cur - < sig->rlim[RLIMIT_RTTIME].rlim_max) { - sig->rlim[RLIMIT_RTTIME].rlim_cur += - USEC_PER_SEC; - } - __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); - } - } } /* diff --git a/trunk/kernel/printk.c b/trunk/kernel/printk.c index 423a8c765a57..89011bf8c106 100644 --- a/trunk/kernel/printk.c +++ b/trunk/kernel/printk.c @@ -573,6 +573,11 @@ static int __init printk_time_setup(char *str) __setup("time", printk_time_setup); +__attribute__((weak)) unsigned long long printk_clock(void) +{ + return sched_clock(); +} + /* Check if we have any console registered that can be called early in boot. */ static int have_callable_console(void) { @@ -623,57 +628,30 @@ asmlinkage int printk(const char *fmt, ...) /* cpu currently holding logbuf_lock */ static volatile unsigned int printk_cpu = UINT_MAX; -const char printk_recursion_bug_msg [] = - KERN_CRIT "BUG: recent printk recursion!\n"; -static int printk_recursion_bug; - asmlinkage int vprintk(const char *fmt, va_list args) { - static int log_level_unknown = 1; - static char printk_buf[1024]; - unsigned long flags; - int printed_len = 0; - int this_cpu; + int printed_len; char *p; + static char printk_buf[1024]; + static int log_level_unknown = 1; boot_delay_msec(); preempt_disable(); - /* This stops the holder of console_sem just where we want him */ - raw_local_irq_save(flags); - this_cpu = smp_processor_id(); - - /* - * Ouch, printk recursed into itself! - */ - if (unlikely(printk_cpu == this_cpu)) { - /* - * If a crash is occurring during printk() on this CPU, - * then try to get the crash message out but make sure - * we can't deadlock. Otherwise just return to avoid the - * recursion and return - but flag the recursion so that - * it can be printed at the next appropriate moment: - */ - if (!oops_in_progress) { - printk_recursion_bug = 1; - goto out_restore_irqs; - } + if (unlikely(oops_in_progress) && printk_cpu == smp_processor_id()) + /* If a crash is occurring during printk() on this CPU, + * make sure we can't deadlock */ zap_locks(); - } + /* This stops the holder of console_sem just where we want him */ + raw_local_irq_save(flags); lockdep_off(); spin_lock(&logbuf_lock); - printk_cpu = this_cpu; + printk_cpu = smp_processor_id(); - if (printk_recursion_bug) { - printk_recursion_bug = 0; - strcpy(printk_buf, printk_recursion_bug_msg); - printed_len = sizeof(printk_recursion_bug_msg); - } /* Emit the output into the temporary buffer */ - printed_len += vscnprintf(printk_buf + printed_len, - sizeof(printk_buf), fmt, args); + printed_len = vscnprintf(printk_buf, sizeof(printk_buf), fmt, args); /* * Copy the output into log_buf. If the caller didn't provide @@ -702,9 +680,7 @@ asmlinkage int vprintk(const char *fmt, va_list args) loglev_char = default_message_loglevel + '0'; } - t = 0; - if (system_state != SYSTEM_BOOTING) - t = ktime_to_ns(ktime_get()); + t = printk_clock(); nanosec_rem = do_div(t, 1000000000); tlen = sprintf(tbuf, "<%c>[%5lu.%06lu] ", @@ -768,7 +744,6 @@ asmlinkage int vprintk(const char *fmt, va_list args) printk_cpu = UINT_MAX; spin_unlock(&logbuf_lock); lockdep_on(); -out_restore_irqs: raw_local_irq_restore(flags); } diff --git a/trunk/kernel/profile.c b/trunk/kernel/profile.c index e64c2da11c0f..5e95330e5120 100644 --- a/trunk/kernel/profile.c +++ b/trunk/kernel/profile.c @@ -52,7 +52,7 @@ static DEFINE_PER_CPU(int, cpu_profile_flip); static DEFINE_MUTEX(profile_flip_mutex); #endif /* CONFIG_SMP */ -static int __init profile_setup(char *str) +static int __init profile_setup(char * str) { static char __initdata schedstr[] = "schedule"; static char __initdata sleepstr[] = "sleep"; @@ -104,28 +104,28 @@ __setup("profile=", profile_setup); void __init profile_init(void) { - if (!prof_on) + if (!prof_on) return; - + /* only text is profiled */ prof_len = (_etext - _stext) >> prof_shift; prof_buffer = alloc_bootmem(prof_len*sizeof(atomic_t)); } /* Profile event notifications */ - + #ifdef CONFIG_PROFILING - + static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); static ATOMIC_NOTIFIER_HEAD(task_free_notifier); static BLOCKING_NOTIFIER_HEAD(munmap_notifier); - -void profile_task_exit(struct task_struct *task) + +void profile_task_exit(struct task_struct * task) { blocking_notifier_call_chain(&task_exit_notifier, 0, task); } - -int profile_handoff_task(struct task_struct *task) + +int profile_handoff_task(struct task_struct * task) { int ret; ret = atomic_notifier_call_chain(&task_free_notifier, 0, task); @@ -137,55 +137,52 @@ void profile_munmap(unsigned long addr) blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr); } -int task_handoff_register(struct notifier_block *n) +int task_handoff_register(struct notifier_block * n) { return atomic_notifier_chain_register(&task_free_notifier, n); } -EXPORT_SYMBOL_GPL(task_handoff_register); -int task_handoff_unregister(struct notifier_block *n) +int task_handoff_unregister(struct notifier_block * n) { return atomic_notifier_chain_unregister(&task_free_notifier, n); } -EXPORT_SYMBOL_GPL(task_handoff_unregister); -int profile_event_register(enum profile_type type, struct notifier_block *n) +int profile_event_register(enum profile_type type, struct notifier_block * n) { int err = -EINVAL; - + switch (type) { - case PROFILE_TASK_EXIT: - err = blocking_notifier_chain_register( - &task_exit_notifier, n); - break; - case PROFILE_MUNMAP: - err = blocking_notifier_chain_register( - &munmap_notifier, n); - break; + case PROFILE_TASK_EXIT: + err = blocking_notifier_chain_register( + &task_exit_notifier, n); + break; + case PROFILE_MUNMAP: + err = blocking_notifier_chain_register( + &munmap_notifier, n); + break; } - + return err; } -EXPORT_SYMBOL_GPL(profile_event_register); -int profile_event_unregister(enum profile_type type, struct notifier_block *n) + +int profile_event_unregister(enum profile_type type, struct notifier_block * n) { int err = -EINVAL; - + switch (type) { - case PROFILE_TASK_EXIT: - err = blocking_notifier_chain_unregister( - &task_exit_notifier, n); - break; - case PROFILE_MUNMAP: - err = blocking_notifier_chain_unregister( - &munmap_notifier, n); - break; + case PROFILE_TASK_EXIT: + err = blocking_notifier_chain_unregister( + &task_exit_notifier, n); + break; + case PROFILE_MUNMAP: + err = blocking_notifier_chain_unregister( + &munmap_notifier, n); + break; } return err; } -EXPORT_SYMBOL_GPL(profile_event_unregister); int register_timer_hook(int (*hook)(struct pt_regs *)) { @@ -194,7 +191,6 @@ int register_timer_hook(int (*hook)(struct pt_regs *)) timer_hook = hook; return 0; } -EXPORT_SYMBOL_GPL(register_timer_hook); void unregister_timer_hook(int (*hook)(struct pt_regs *)) { @@ -203,7 +199,13 @@ void unregister_timer_hook(int (*hook)(struct pt_regs *)) /* make sure all CPUs see the NULL hook */ synchronize_sched(); /* Allow ongoing interrupts to complete. */ } + +EXPORT_SYMBOL_GPL(register_timer_hook); EXPORT_SYMBOL_GPL(unregister_timer_hook); +EXPORT_SYMBOL_GPL(task_handoff_register); +EXPORT_SYMBOL_GPL(task_handoff_unregister); +EXPORT_SYMBOL_GPL(profile_event_register); +EXPORT_SYMBOL_GPL(profile_event_unregister); #endif /* CONFIG_PROFILING */ @@ -364,7 +366,7 @@ static int __devinit profile_cpu_callback(struct notifier_block *info, per_cpu(cpu_profile_hits, cpu)[0] = page_address(page); } break; -out_free: + out_free: page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); per_cpu(cpu_profile_hits, cpu)[1] = NULL; __free_page(page); @@ -407,6 +409,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits) atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); } #endif /* !CONFIG_SMP */ + EXPORT_SYMBOL_GPL(profile_hits); void profile_tick(int type) @@ -424,7 +427,7 @@ void profile_tick(int type) #include #include -static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, +static int prof_cpu_mask_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data) { int len = cpumask_scnprintf(page, count, *(cpumask_t *)data); @@ -434,8 +437,8 @@ static int prof_cpu_mask_read_proc(char *page, char **start, off_t off, return len; } -static int prof_cpu_mask_write_proc(struct file *file, - const char __user *buffer, unsigned long count, void *data) +static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffer, + unsigned long count, void *data) { cpumask_t *mask = (cpumask_t *)data; unsigned long full_count = count, err; @@ -454,8 +457,7 @@ void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir) struct proc_dir_entry *entry; /* create /proc/irq/prof_cpu_mask */ - entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); - if (!entry) + if (!(entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir))) return; entry->data = (void *)&prof_cpu_mask; entry->read_proc = prof_cpu_mask_read_proc; @@ -473,7 +475,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; ssize_t read; - char *pnt; + char * pnt; unsigned int sample_step = 1 << prof_shift; profile_flip_buffers(); @@ -484,12 +486,12 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) read = 0; while (p < sizeof(unsigned int) && count > 0) { - if (put_user(*((char *)(&sample_step)+p), buf)) + if (put_user(*((char *)(&sample_step)+p),buf)) return -EFAULT; buf++; p++; count--; read++; } pnt = (char *)prof_buffer + p - sizeof(atomic_t); - if (copy_to_user(buf, (void *)pnt, count)) + if (copy_to_user(buf,(void *)pnt,count)) return -EFAULT; read += count; *ppos += read; @@ -506,7 +508,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { #ifdef CONFIG_SMP - extern int setup_profiling_timer(unsigned int multiplier); + extern int setup_profiling_timer (unsigned int multiplier); if (count == sizeof(int)) { unsigned int multiplier; @@ -589,8 +591,7 @@ static int __init create_proc_profile(void) return 0; if (create_hash_tables()) return -1; - entry = create_proc_entry("profile", S_IWUSR | S_IRUGO, NULL); - if (!entry) + if (!(entry = create_proc_entry("profile", S_IWUSR | S_IRUGO, NULL))) return 0; entry->proc_fops = &proc_profile_operations; entry->size = (1+prof_len) * sizeof(atomic_t); diff --git a/trunk/kernel/rcuclassic.c b/trunk/kernel/rcuclassic.c deleted file mode 100644 index f4ffbd0f306f..000000000000 --- a/trunk/kernel/rcuclassic.c +++ /dev/null @@ -1,575 +0,0 @@ -/* - * Read-Copy Update mechanism for mutual exclusion - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright IBM Corporation, 2001 - * - * Authors: Dipankar Sarma - * Manfred Spraul - * - * Based on the original work by Paul McKenney - * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. - * Papers: - * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf - * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) - * - * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU - * - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -static struct lock_class_key rcu_lock_key; -struct lockdep_map rcu_lock_map = - STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); -EXPORT_SYMBOL_GPL(rcu_lock_map); -#endif - - -/* Definition for rcupdate control block. */ -static struct rcu_ctrlblk rcu_ctrlblk = { - .cur = -300, - .completed = -300, - .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), - .cpumask = CPU_MASK_NONE, -}; -static struct rcu_ctrlblk rcu_bh_ctrlblk = { - .cur = -300, - .completed = -300, - .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), - .cpumask = CPU_MASK_NONE, -}; - -DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; -DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; - -static int blimit = 10; -static int qhimark = 10000; -static int qlowmark = 100; - -#ifdef CONFIG_SMP -static void force_quiescent_state(struct rcu_data *rdp, - struct rcu_ctrlblk *rcp) -{ - int cpu; - cpumask_t cpumask; - set_need_resched(); - if (unlikely(!rcp->signaled)) { - rcp->signaled = 1; - /* - * Don't send IPI to itself. With irqs disabled, - * rdp->cpu is the current cpu. - */ - cpumask = rcp->cpumask; - cpu_clear(rdp->cpu, cpumask); - for_each_cpu_mask(cpu, cpumask) - smp_send_reschedule(cpu); - } -} -#else -static inline void force_quiescent_state(struct rcu_data *rdp, - struct rcu_ctrlblk *rcp) -{ - set_need_resched(); -} -#endif - -/** - * call_rcu - Queue an RCU callback for invocation after a grace period. - * @head: structure to be used for queueing the RCU updates. - * @func: actual update function to be invoked after the grace period - * - * The update function will be invoked some time after a full grace - * period elapses, in other words after all currently executing RCU - * read-side critical sections have completed. RCU read-side critical - * sections are delimited by rcu_read_lock() and rcu_read_unlock(), - * and may be nested. - */ -void call_rcu(struct rcu_head *head, - void (*func)(struct rcu_head *rcu)) -{ - unsigned long flags; - struct rcu_data *rdp; - - head->func = func; - head->next = NULL; - local_irq_save(flags); - rdp = &__get_cpu_var(rcu_data); - *rdp->nxttail = head; - rdp->nxttail = &head->next; - if (unlikely(++rdp->qlen > qhimark)) { - rdp->blimit = INT_MAX; - force_quiescent_state(rdp, &rcu_ctrlblk); - } - local_irq_restore(flags); -} -EXPORT_SYMBOL_GPL(call_rcu); - -/** - * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. - * @head: structure to be used for queueing the RCU updates. - * @func: actual update function to be invoked after the grace period - * - * The update function will be invoked some time after a full grace - * period elapses, in other words after all currently executing RCU - * read-side critical sections have completed. call_rcu_bh() assumes - * that the read-side critical sections end on completion of a softirq - * handler. This means that read-side critical sections in process - * context must not be interrupted by softirqs. This interface is to be - * used when most of the read-side critical sections are in softirq context. - * RCU read-side critical sections are delimited by rcu_read_lock() and - * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh() - * and rcu_read_unlock_bh(), if in process context. These may be nested. - */ -void call_rcu_bh(struct rcu_head *head, - void (*func)(struct rcu_head *rcu)) -{ - unsigned long flags; - struct rcu_data *rdp; - - head->func = func; - head->next = NULL; - local_irq_save(flags); - rdp = &__get_cpu_var(rcu_bh_data); - *rdp->nxttail = head; - rdp->nxttail = &head->next; - - if (unlikely(++rdp->qlen > qhimark)) { - rdp->blimit = INT_MAX; - force_quiescent_state(rdp, &rcu_bh_ctrlblk); - } - - local_irq_restore(flags); -} -EXPORT_SYMBOL_GPL(call_rcu_bh); - -/* - * Return the number of RCU batches processed thus far. Useful - * for debug and statistics. - */ -long rcu_batches_completed(void) -{ - return rcu_ctrlblk.completed; -} -EXPORT_SYMBOL_GPL(rcu_batches_completed); - -/* - * Return the number of RCU batches processed thus far. Useful - * for debug and statistics. - */ -long rcu_batches_completed_bh(void) -{ - return rcu_bh_ctrlblk.completed; -} -EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); - -/* Raises the softirq for processing rcu_callbacks. */ -static inline void raise_rcu_softirq(void) -{ - raise_softirq(RCU_SOFTIRQ); - /* - * The smp_mb() here is required to ensure that this cpu's - * __rcu_process_callbacks() reads the most recently updated - * value of rcu->cur. - */ - smp_mb(); -} - -/* - * Invoke the completed RCU callbacks. They are expected to be in - * a per-cpu list. - */ -static void rcu_do_batch(struct rcu_data *rdp) -{ - struct rcu_head *next, *list; - int count = 0; - - list = rdp->donelist; - while (list) { - next = list->next; - prefetch(next); - list->func(list); - list = next; - if (++count >= rdp->blimit) - break; - } - rdp->donelist = list; - - local_irq_disable(); - rdp->qlen -= count; - local_irq_enable(); - if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) - rdp->blimit = blimit; - - if (!rdp->donelist) - rdp->donetail = &rdp->donelist; - else - raise_rcu_softirq(); -} - -/* - * Grace period handling: - * The grace period handling consists out of two steps: - * - A new grace period is started. - * This is done by rcu_start_batch. The start is not broadcasted to - * all cpus, they must pick this up by comparing rcp->cur with - * rdp->quiescbatch. All cpus are recorded in the - * rcu_ctrlblk.cpumask bitmap. - * - All cpus must go through a quiescent state. - * Since the start of the grace period is not broadcasted, at least two - * calls to rcu_check_quiescent_state are required: - * The first call just notices that a new grace period is running. The - * following calls check if there was a quiescent state since the beginning - * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If - * the bitmap is empty, then the grace period is completed. - * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace - * period (if necessary). - */ -/* - * Register a new batch of callbacks, and start it up if there is currently no - * active batch and the batch to be registered has not already occurred. - * Caller must hold rcu_ctrlblk.lock. - */ -static void rcu_start_batch(struct rcu_ctrlblk *rcp) -{ - if (rcp->next_pending && - rcp->completed == rcp->cur) { - rcp->next_pending = 0; - /* - * next_pending == 0 must be visible in - * __rcu_process_callbacks() before it can see new value of cur. - */ - smp_wmb(); - rcp->cur++; - - /* - * Accessing nohz_cpu_mask before incrementing rcp->cur needs a - * Barrier Otherwise it can cause tickless idle CPUs to be - * included in rcp->cpumask, which will extend graceperiods - * unnecessarily. - */ - smp_mb(); - cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); - - rcp->signaled = 0; - } -} - -/* - * cpu went through a quiescent state since the beginning of the grace period. - * Clear it from the cpu mask and complete the grace period if it was the last - * cpu. Start another grace period if someone has further entries pending - */ -static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) -{ - cpu_clear(cpu, rcp->cpumask); - if (cpus_empty(rcp->cpumask)) { - /* batch completed ! */ - rcp->completed = rcp->cur; - rcu_start_batch(rcp); - } -} - -/* - * Check if the cpu has gone through a quiescent state (say context - * switch). If so and if it already hasn't done so in this RCU - * quiescent cycle, then indicate that it has done so. - */ -static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, - struct rcu_data *rdp) -{ - if (rdp->quiescbatch != rcp->cur) { - /* start new grace period: */ - rdp->qs_pending = 1; - rdp->passed_quiesc = 0; - rdp->quiescbatch = rcp->cur; - return; - } - - /* Grace period already completed for this cpu? - * qs_pending is checked instead of the actual bitmap to avoid - * cacheline trashing. - */ - if (!rdp->qs_pending) - return; - - /* - * Was there a quiescent state since the beginning of the grace - * period? If no, then exit and wait for the next call. - */ - if (!rdp->passed_quiesc) - return; - rdp->qs_pending = 0; - - spin_lock(&rcp->lock); - /* - * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync - * during cpu startup. Ignore the quiescent state. - */ - if (likely(rdp->quiescbatch == rcp->cur)) - cpu_quiet(rdp->cpu, rcp); - - spin_unlock(&rcp->lock); -} - - -#ifdef CONFIG_HOTPLUG_CPU - -/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing - * locking requirements, the list it's pulling from has to belong to a cpu - * which is dead and hence not processing interrupts. - */ -static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, - struct rcu_head **tail) -{ - local_irq_disable(); - *this_rdp->nxttail = list; - if (list) - this_rdp->nxttail = tail; - local_irq_enable(); -} - -static void __rcu_offline_cpu(struct rcu_data *this_rdp, - struct rcu_ctrlblk *rcp, struct rcu_data *rdp) -{ - /* if the cpu going offline owns the grace period - * we can block indefinitely waiting for it, so flush - * it here - */ - spin_lock_bh(&rcp->lock); - if (rcp->cur != rcp->completed) - cpu_quiet(rdp->cpu, rcp); - spin_unlock_bh(&rcp->lock); - rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); - rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); - rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); -} - -static void rcu_offline_cpu(int cpu) -{ - struct rcu_data *this_rdp = &get_cpu_var(rcu_data); - struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data); - - __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, - &per_cpu(rcu_data, cpu)); - __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, - &per_cpu(rcu_bh_data, cpu)); - put_cpu_var(rcu_data); - put_cpu_var(rcu_bh_data); -} - -#else - -static void rcu_offline_cpu(int cpu) -{ -} - -#endif - -/* - * This does the RCU processing work from softirq context. - */ -static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, - struct rcu_data *rdp) -{ - if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { - *rdp->donetail = rdp->curlist; - rdp->donetail = rdp->curtail; - rdp->curlist = NULL; - rdp->curtail = &rdp->curlist; - } - - if (rdp->nxtlist && !rdp->curlist) { - local_irq_disable(); - rdp->curlist = rdp->nxtlist; - rdp->curtail = rdp->nxttail; - rdp->nxtlist = NULL; - rdp->nxttail = &rdp->nxtlist; - local_irq_enable(); - - /* - * start the next batch of callbacks - */ - - /* determine batch number */ - rdp->batch = rcp->cur + 1; - /* see the comment and corresponding wmb() in - * the rcu_start_batch() - */ - smp_rmb(); - - if (!rcp->next_pending) { - /* and start it/schedule start if it's a new batch */ - spin_lock(&rcp->lock); - rcp->next_pending = 1; - rcu_start_batch(rcp); - spin_unlock(&rcp->lock); - } - } - - rcu_check_quiescent_state(rcp, rdp); - if (rdp->donelist) - rcu_do_batch(rdp); -} - -static void rcu_process_callbacks(struct softirq_action *unused) -{ - __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); - __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); -} - -static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) -{ - /* This cpu has pending rcu entries and the grace period - * for them has completed. - */ - if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) - return 1; - - /* This cpu has no pending entries, but there are new entries */ - if (!rdp->curlist && rdp->nxtlist) - return 1; - - /* This cpu has finished callbacks to invoke */ - if (rdp->donelist) - return 1; - - /* The rcu core waits for a quiescent state from the cpu */ - if (rdp->quiescbatch != rcp->cur || rdp->qs_pending) - return 1; - - /* nothing to do */ - return 0; -} - -/* - * Check to see if there is any immediate RCU-related work to be done - * by the current CPU, returning 1 if so. This function is part of the - * RCU implementation; it is -not- an exported member of the RCU API. - */ -int rcu_pending(int cpu) -{ - return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || - __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); -} - -/* - * Check to see if any future RCU-related work will need to be done - * by the current CPU, even if none need be done immediately, returning - * 1 if so. This function is part of the RCU implementation; it is -not- - * an exported member of the RCU API. - */ -int rcu_needs_cpu(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); - - return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu)); -} - -void rcu_check_callbacks(int cpu, int user) -{ - if (user || - (idle_cpu(cpu) && !in_softirq() && - hardirq_count() <= (1 << HARDIRQ_SHIFT))) { - rcu_qsctr_inc(cpu); - rcu_bh_qsctr_inc(cpu); - } else if (!in_softirq()) - rcu_bh_qsctr_inc(cpu); - raise_rcu_softirq(); -} - -static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, - struct rcu_data *rdp) -{ - memset(rdp, 0, sizeof(*rdp)); - rdp->curtail = &rdp->curlist; - rdp->nxttail = &rdp->nxtlist; - rdp->donetail = &rdp->donelist; - rdp->quiescbatch = rcp->completed; - rdp->qs_pending = 0; - rdp->cpu = cpu; - rdp->blimit = blimit; -} - -static void __cpuinit rcu_online_cpu(int cpu) -{ - struct rcu_data *rdp = &per_cpu(rcu_data, cpu); - struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu); - - rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); - rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp); - open_softirq(RCU_SOFTIRQ, rcu_process_callbacks, NULL); -} - -static int __cpuinit rcu_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - long cpu = (long)hcpu; - - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - rcu_online_cpu(cpu); - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - rcu_offline_cpu(cpu); - break; - default: - break; - } - return NOTIFY_OK; -} - -static struct notifier_block __cpuinitdata rcu_nb = { - .notifier_call = rcu_cpu_notify, -}; - -/* - * Initializes rcu mechanism. Assumed to be called early. - * That is before local timer(SMP) or jiffie timer (uniproc) is setup. - * Note that rcu_qsctr and friends are implicitly - * initialized due to the choice of ``0'' for RCU_CTR_INVALID. - */ -void __init __rcu_init(void) -{ - rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, - (void *)(long)smp_processor_id()); - /* Register notifier for non-boot CPUs */ - register_cpu_notifier(&rcu_nb); -} - -module_param(blimit, int, 0); -module_param(qhimark, int, 0); -module_param(qlowmark, int, 0); diff --git a/trunk/kernel/rcupdate.c b/trunk/kernel/rcupdate.c index 760dfc233a00..f2c1a04e9b18 100644 --- a/trunk/kernel/rcupdate.c +++ b/trunk/kernel/rcupdate.c @@ -15,7 +15,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * - * Copyright IBM Corporation, 2001 + * Copyright (C) IBM Corporation, 2001 * * Authors: Dipankar Sarma * Manfred Spraul @@ -35,57 +35,165 @@ #include #include #include +#include #include #include #include #include +#include #include +#include #include #include #include #include -#include -struct rcu_synchronize { - struct rcu_head head; - struct completion completion; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static struct lock_class_key rcu_lock_key; +struct lockdep_map rcu_lock_map = + STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); + +EXPORT_SYMBOL_GPL(rcu_lock_map); +#endif + +/* Definition for rcupdate control block. */ +static struct rcu_ctrlblk rcu_ctrlblk = { + .cur = -300, + .completed = -300, + .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), + .cpumask = CPU_MASK_NONE, +}; +static struct rcu_ctrlblk rcu_bh_ctrlblk = { + .cur = -300, + .completed = -300, + .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), + .cpumask = CPU_MASK_NONE, }; -static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; +DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; +DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; + +/* Fake initialization required by compiler */ +static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL}; +static int blimit = 10; +static int qhimark = 10000; +static int qlowmark = 100; + static atomic_t rcu_barrier_cpu_count; static DEFINE_MUTEX(rcu_barrier_mutex); static struct completion rcu_barrier_completion; -/* Because of FASTCALL declaration of complete, we use this wrapper */ -static void wakeme_after_rcu(struct rcu_head *head) +#ifdef CONFIG_SMP +static void force_quiescent_state(struct rcu_data *rdp, + struct rcu_ctrlblk *rcp) { - struct rcu_synchronize *rcu; - - rcu = container_of(head, struct rcu_synchronize, head); - complete(&rcu->completion); + int cpu; + cpumask_t cpumask; + set_need_resched(); + if (unlikely(!rcp->signaled)) { + rcp->signaled = 1; + /* + * Don't send IPI to itself. With irqs disabled, + * rdp->cpu is the current cpu. + */ + cpumask = rcp->cpumask; + cpu_clear(rdp->cpu, cpumask); + for_each_cpu_mask(cpu, cpumask) + smp_send_reschedule(cpu); + } +} +#else +static inline void force_quiescent_state(struct rcu_data *rdp, + struct rcu_ctrlblk *rcp) +{ + set_need_resched(); } +#endif /** - * synchronize_rcu - wait until a grace period has elapsed. + * call_rcu - Queue an RCU callback for invocation after a grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual update function to be invoked after the grace period * - * Control will return to the caller some time after a full grace - * period has elapsed, in other words after all currently executing RCU + * The update function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU * read-side critical sections have completed. RCU read-side critical * sections are delimited by rcu_read_lock() and rcu_read_unlock(), * and may be nested. */ -void synchronize_rcu(void) +void fastcall call_rcu(struct rcu_head *head, + void (*func)(struct rcu_head *rcu)) { - struct rcu_synchronize rcu; + unsigned long flags; + struct rcu_data *rdp; - init_completion(&rcu.completion); - /* Will wake me after RCU finished */ - call_rcu(&rcu.head, wakeme_after_rcu); + head->func = func; + head->next = NULL; + local_irq_save(flags); + rdp = &__get_cpu_var(rcu_data); + *rdp->nxttail = head; + rdp->nxttail = &head->next; + if (unlikely(++rdp->qlen > qhimark)) { + rdp->blimit = INT_MAX; + force_quiescent_state(rdp, &rcu_ctrlblk); + } + local_irq_restore(flags); +} - /* Wait for it */ - wait_for_completion(&rcu.completion); +/** + * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. + * @head: structure to be used for queueing the RCU updates. + * @func: actual update function to be invoked after the grace period + * + * The update function will be invoked some time after a full grace + * period elapses, in other words after all currently executing RCU + * read-side critical sections have completed. call_rcu_bh() assumes + * that the read-side critical sections end on completion of a softirq + * handler. This means that read-side critical sections in process + * context must not be interrupted by softirqs. This interface is to be + * used when most of the read-side critical sections are in softirq context. + * RCU read-side critical sections are delimited by rcu_read_lock() and + * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh() + * and rcu_read_unlock_bh(), if in process context. These may be nested. + */ +void fastcall call_rcu_bh(struct rcu_head *head, + void (*func)(struct rcu_head *rcu)) +{ + unsigned long flags; + struct rcu_data *rdp; + + head->func = func; + head->next = NULL; + local_irq_save(flags); + rdp = &__get_cpu_var(rcu_bh_data); + *rdp->nxttail = head; + rdp->nxttail = &head->next; + + if (unlikely(++rdp->qlen > qhimark)) { + rdp->blimit = INT_MAX; + force_quiescent_state(rdp, &rcu_bh_ctrlblk); + } + + local_irq_restore(flags); +} + +/* + * Return the number of RCU batches processed thus far. Useful + * for debug and statistics. + */ +long rcu_batches_completed(void) +{ + return rcu_ctrlblk.completed; +} + +/* + * Return the number of RCU batches processed thus far. Useful + * for debug and statistics. + */ +long rcu_batches_completed_bh(void) +{ + return rcu_bh_ctrlblk.completed; } -EXPORT_SYMBOL_GPL(synchronize_rcu); static void rcu_barrier_callback(struct rcu_head *notused) { @@ -99,8 +207,10 @@ static void rcu_barrier_callback(struct rcu_head *notused) static void rcu_barrier_func(void *notused) { int cpu = smp_processor_id(); - struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); + struct rcu_head *head; + head = &rdp->barrier; atomic_inc(&rcu_barrier_cpu_count); call_rcu(head, rcu_barrier_callback); } @@ -115,24 +225,420 @@ void rcu_barrier(void) mutex_lock(&rcu_barrier_mutex); init_completion(&rcu_barrier_completion); atomic_set(&rcu_barrier_cpu_count, 0); - /* - * The queueing of callbacks in all CPUs must be atomic with - * respect to RCU, otherwise one CPU may queue a callback, - * wait for a grace period, decrement barrier count and call - * complete(), while other CPUs have not yet queued anything. - * So, we need to make sure that grace periods cannot complete - * until all the callbacks are queued. - */ - rcu_read_lock(); on_each_cpu(rcu_barrier_func, NULL, 0, 1); - rcu_read_unlock(); wait_for_completion(&rcu_barrier_completion); mutex_unlock(&rcu_barrier_mutex); } EXPORT_SYMBOL_GPL(rcu_barrier); +/* + * Invoke the completed RCU callbacks. They are expected to be in + * a per-cpu list. + */ +static void rcu_do_batch(struct rcu_data *rdp) +{ + struct rcu_head *next, *list; + int count = 0; + + list = rdp->donelist; + while (list) { + next = list->next; + prefetch(next); + list->func(list); + list = next; + if (++count >= rdp->blimit) + break; + } + rdp->donelist = list; + + local_irq_disable(); + rdp->qlen -= count; + local_irq_enable(); + if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) + rdp->blimit = blimit; + + if (!rdp->donelist) + rdp->donetail = &rdp->donelist; + else + tasklet_schedule(&per_cpu(rcu_tasklet, rdp->cpu)); +} + +/* + * Grace period handling: + * The grace period handling consists out of two steps: + * - A new grace period is started. + * This is done by rcu_start_batch. The start is not broadcasted to + * all cpus, they must pick this up by comparing rcp->cur with + * rdp->quiescbatch. All cpus are recorded in the + * rcu_ctrlblk.cpumask bitmap. + * - All cpus must go through a quiescent state. + * Since the start of the grace period is not broadcasted, at least two + * calls to rcu_check_quiescent_state are required: + * The first call just notices that a new grace period is running. The + * following calls check if there was a quiescent state since the beginning + * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If + * the bitmap is empty, then the grace period is completed. + * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace + * period (if necessary). + */ +/* + * Register a new batch of callbacks, and start it up if there is currently no + * active batch and the batch to be registered has not already occurred. + * Caller must hold rcu_ctrlblk.lock. + */ +static void rcu_start_batch(struct rcu_ctrlblk *rcp) +{ + if (rcp->next_pending && + rcp->completed == rcp->cur) { + rcp->next_pending = 0; + /* + * next_pending == 0 must be visible in + * __rcu_process_callbacks() before it can see new value of cur. + */ + smp_wmb(); + rcp->cur++; + + /* + * Accessing nohz_cpu_mask before incrementing rcp->cur needs a + * Barrier Otherwise it can cause tickless idle CPUs to be + * included in rcp->cpumask, which will extend graceperiods + * unnecessarily. + */ + smp_mb(); + cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask); + + rcp->signaled = 0; + } +} + +/* + * cpu went through a quiescent state since the beginning of the grace period. + * Clear it from the cpu mask and complete the grace period if it was the last + * cpu. Start another grace period if someone has further entries pending + */ +static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) +{ + cpu_clear(cpu, rcp->cpumask); + if (cpus_empty(rcp->cpumask)) { + /* batch completed ! */ + rcp->completed = rcp->cur; + rcu_start_batch(rcp); + } +} + +/* + * Check if the cpu has gone through a quiescent state (say context + * switch). If so and if it already hasn't done so in this RCU + * quiescent cycle, then indicate that it has done so. + */ +static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, + struct rcu_data *rdp) +{ + if (rdp->quiescbatch != rcp->cur) { + /* start new grace period: */ + rdp->qs_pending = 1; + rdp->passed_quiesc = 0; + rdp->quiescbatch = rcp->cur; + return; + } + + /* Grace period already completed for this cpu? + * qs_pending is checked instead of the actual bitmap to avoid + * cacheline trashing. + */ + if (!rdp->qs_pending) + return; + + /* + * Was there a quiescent state since the beginning of the grace + * period? If no, then exit and wait for the next call. + */ + if (!rdp->passed_quiesc) + return; + rdp->qs_pending = 0; + + spin_lock(&rcp->lock); + /* + * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync + * during cpu startup. Ignore the quiescent state. + */ + if (likely(rdp->quiescbatch == rcp->cur)) + cpu_quiet(rdp->cpu, rcp); + + spin_unlock(&rcp->lock); +} + + +#ifdef CONFIG_HOTPLUG_CPU + +/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing + * locking requirements, the list it's pulling from has to belong to a cpu + * which is dead and hence not processing interrupts. + */ +static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, + struct rcu_head **tail) +{ + local_irq_disable(); + *this_rdp->nxttail = list; + if (list) + this_rdp->nxttail = tail; + local_irq_enable(); +} + +static void __rcu_offline_cpu(struct rcu_data *this_rdp, + struct rcu_ctrlblk *rcp, struct rcu_data *rdp) +{ + /* if the cpu going offline owns the grace period + * we can block indefinitely waiting for it, so flush + * it here + */ + spin_lock_bh(&rcp->lock); + if (rcp->cur != rcp->completed) + cpu_quiet(rdp->cpu, rcp); + spin_unlock_bh(&rcp->lock); + rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); + rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); + rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); +} + +static void rcu_offline_cpu(int cpu) +{ + struct rcu_data *this_rdp = &get_cpu_var(rcu_data); + struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data); + + __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, + &per_cpu(rcu_data, cpu)); + __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, + &per_cpu(rcu_bh_data, cpu)); + put_cpu_var(rcu_data); + put_cpu_var(rcu_bh_data); + tasklet_kill_immediate(&per_cpu(rcu_tasklet, cpu), cpu); +} + +#else + +static void rcu_offline_cpu(int cpu) +{ +} + +#endif + +/* + * This does the RCU processing work from tasklet context. + */ +static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, + struct rcu_data *rdp) +{ + if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { + *rdp->donetail = rdp->curlist; + rdp->donetail = rdp->curtail; + rdp->curlist = NULL; + rdp->curtail = &rdp->curlist; + } + + if (rdp->nxtlist && !rdp->curlist) { + local_irq_disable(); + rdp->curlist = rdp->nxtlist; + rdp->curtail = rdp->nxttail; + rdp->nxtlist = NULL; + rdp->nxttail = &rdp->nxtlist; + local_irq_enable(); + + /* + * start the next batch of callbacks + */ + + /* determine batch number */ + rdp->batch = rcp->cur + 1; + /* see the comment and corresponding wmb() in + * the rcu_start_batch() + */ + smp_rmb(); + + if (!rcp->next_pending) { + /* and start it/schedule start if it's a new batch */ + spin_lock(&rcp->lock); + rcp->next_pending = 1; + rcu_start_batch(rcp); + spin_unlock(&rcp->lock); + } + } + + rcu_check_quiescent_state(rcp, rdp); + if (rdp->donelist) + rcu_do_batch(rdp); +} + +static void rcu_process_callbacks(unsigned long unused) +{ + __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); + __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); +} + +static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) +{ + /* This cpu has pending rcu entries and the grace period + * for them has completed. + */ + if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) + return 1; + + /* This cpu has no pending entries, but there are new entries */ + if (!rdp->curlist && rdp->nxtlist) + return 1; + + /* This cpu has finished callbacks to invoke */ + if (rdp->donelist) + return 1; + + /* The rcu core waits for a quiescent state from the cpu */ + if (rdp->quiescbatch != rcp->cur || rdp->qs_pending) + return 1; + + /* nothing to do */ + return 0; +} + +/* + * Check to see if there is any immediate RCU-related work to be done + * by the current CPU, returning 1 if so. This function is part of the + * RCU implementation; it is -not- an exported member of the RCU API. + */ +int rcu_pending(int cpu) +{ + return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || + __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); +} + +/* + * Check to see if any future RCU-related work will need to be done + * by the current CPU, even if none need be done immediately, returning + * 1 if so. This function is part of the RCU implementation; it is -not- + * an exported member of the RCU API. + */ +int rcu_needs_cpu(int cpu) +{ + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); + struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); + + return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu)); +} + +void rcu_check_callbacks(int cpu, int user) +{ + if (user || + (idle_cpu(cpu) && !in_softirq() && + hardirq_count() <= (1 << HARDIRQ_SHIFT))) { + rcu_qsctr_inc(cpu); + rcu_bh_qsctr_inc(cpu); + } else if (!in_softirq()) + rcu_bh_qsctr_inc(cpu); + tasklet_schedule(&per_cpu(rcu_tasklet, cpu)); +} + +static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, + struct rcu_data *rdp) +{ + memset(rdp, 0, sizeof(*rdp)); + rdp->curtail = &rdp->curlist; + rdp->nxttail = &rdp->nxtlist; + rdp->donetail = &rdp->donelist; + rdp->quiescbatch = rcp->completed; + rdp->qs_pending = 0; + rdp->cpu = cpu; + rdp->blimit = blimit; +} + +static void __cpuinit rcu_online_cpu(int cpu) +{ + struct rcu_data *rdp = &per_cpu(rcu_data, cpu); + struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu); + + rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); + rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp); + tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); +} + +static int __cpuinit rcu_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + long cpu = (long)hcpu; + switch (action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + rcu_online_cpu(cpu); + break; + case CPU_DEAD: + case CPU_DEAD_FROZEN: + rcu_offline_cpu(cpu); + break; + default: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata rcu_nb = { + .notifier_call = rcu_cpu_notify, +}; + +/* + * Initializes rcu mechanism. Assumed to be called early. + * That is before local timer(SMP) or jiffie timer (uniproc) is setup. + * Note that rcu_qsctr and friends are implicitly + * initialized due to the choice of ``0'' for RCU_CTR_INVALID. + */ void __init rcu_init(void) { - __rcu_init(); + rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, + (void *)(long)smp_processor_id()); + /* Register notifier for non-boot CPUs */ + register_cpu_notifier(&rcu_nb); +} + +struct rcu_synchronize { + struct rcu_head head; + struct completion completion; +}; + +/* Because of FASTCALL declaration of complete, we use this wrapper */ +static void wakeme_after_rcu(struct rcu_head *head) +{ + struct rcu_synchronize *rcu; + + rcu = container_of(head, struct rcu_synchronize, head); + complete(&rcu->completion); } +/** + * synchronize_rcu - wait until a grace period has elapsed. + * + * Control will return to the caller some time after a full grace + * period has elapsed, in other words after all currently executing RCU + * read-side critical sections have completed. RCU read-side critical + * sections are delimited by rcu_read_lock() and rcu_read_unlock(), + * and may be nested. + * + * If your read-side code is not protected by rcu_read_lock(), do -not- + * use synchronize_rcu(). + */ +void synchronize_rcu(void) +{ + struct rcu_synchronize rcu; + + init_completion(&rcu.completion); + /* Will wake me after RCU finished */ + call_rcu(&rcu.head, wakeme_after_rcu); + + /* Wait for it */ + wait_for_completion(&rcu.completion); +} + +module_param(blimit, int, 0); +module_param(qhimark, int, 0); +module_param(qlowmark, int, 0); +EXPORT_SYMBOL_GPL(rcu_batches_completed); +EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); +EXPORT_SYMBOL_GPL(call_rcu); +EXPORT_SYMBOL_GPL(call_rcu_bh); +EXPORT_SYMBOL_GPL(synchronize_rcu); diff --git a/trunk/kernel/rcupreempt.c b/trunk/kernel/rcupreempt.c deleted file mode 100644 index 987cfb7ade89..000000000000 --- a/trunk/kernel/rcupreempt.c +++ /dev/null @@ -1,953 +0,0 @@ -/* - * Read-Copy Update mechanism for mutual exclusion, realtime implementation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright IBM Corporation, 2006 - * - * Authors: Paul E. McKenney - * With thanks to Esben Nielsen, Bill Huey, and Ingo Molnar - * for pushing me away from locks and towards counters, and - * to Suparna Bhattacharya for pushing me completely away - * from atomic instructions on the read side. - * - * Papers: http://www.rdrop.com/users/paulmck/RCU - * - * Design Document: http://lwn.net/Articles/253651/ - * - * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU/ *.txt - * - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * Macro that prevents the compiler from reordering accesses, but does - * absolutely -nothing- to prevent CPUs from reordering. This is used - * only to mediate communication between mainline code and hardware - * interrupt and NMI handlers. - */ -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) - -/* - * PREEMPT_RCU data structures. - */ - -/* - * GP_STAGES specifies the number of times the state machine has - * to go through the all the rcu_try_flip_states (see below) - * in a single Grace Period. - * - * GP in GP_STAGES stands for Grace Period ;) - */ -#define GP_STAGES 2 -struct rcu_data { - spinlock_t lock; /* Protect rcu_data fields. */ - long completed; /* Number of last completed batch. */ - int waitlistcount; - struct tasklet_struct rcu_tasklet; - struct rcu_head *nextlist; - struct rcu_head **nexttail; - struct rcu_head *waitlist[GP_STAGES]; - struct rcu_head **waittail[GP_STAGES]; - struct rcu_head *donelist; - struct rcu_head **donetail; - long rcu_flipctr[2]; -#ifdef CONFIG_RCU_TRACE - struct rcupreempt_trace trace; -#endif /* #ifdef CONFIG_RCU_TRACE */ -}; - -/* - * States for rcu_try_flip() and friends. - */ - -enum rcu_try_flip_states { - - /* - * Stay here if nothing is happening. Flip the counter if somthing - * starts happening. Denoted by "I" - */ - rcu_try_flip_idle_state, - - /* - * Wait here for all CPUs to notice that the counter has flipped. This - * prevents the old set of counters from ever being incremented once - * we leave this state, which in turn is necessary because we cannot - * test any individual counter for zero -- we can only check the sum. - * Denoted by "A". - */ - rcu_try_flip_waitack_state, - - /* - * Wait here for the sum of the old per-CPU counters to reach zero. - * Denoted by "Z". - */ - rcu_try_flip_waitzero_state, - - /* - * Wait here for each of the other CPUs to execute a memory barrier. - * This is necessary to ensure that these other CPUs really have - * completed executing their RCU read-side critical sections, despite - * their CPUs wildly reordering memory. Denoted by "M". - */ - rcu_try_flip_waitmb_state, -}; - -struct rcu_ctrlblk { - spinlock_t fliplock; /* Protect state-machine transitions. */ - long completed; /* Number of last completed batch. */ - enum rcu_try_flip_states rcu_try_flip_state; /* The current state of - the rcu state machine */ -}; - -static DEFINE_PER_CPU(struct rcu_data, rcu_data); -static struct rcu_ctrlblk rcu_ctrlblk = { - .fliplock = __SPIN_LOCK_UNLOCKED(rcu_ctrlblk.fliplock), - .completed = 0, - .rcu_try_flip_state = rcu_try_flip_idle_state, -}; - - -#ifdef CONFIG_RCU_TRACE -static char *rcu_try_flip_state_names[] = - { "idle", "waitack", "waitzero", "waitmb" }; -#endif /* #ifdef CONFIG_RCU_TRACE */ - -static cpumask_t rcu_cpu_online_map __read_mostly = CPU_MASK_NONE; - -/* - * Enum and per-CPU flag to determine when each CPU has seen - * the most recent counter flip. - */ - -enum rcu_flip_flag_values { - rcu_flip_seen, /* Steady/initial state, last flip seen. */ - /* Only GP detector can update. */ - rcu_flipped /* Flip just completed, need confirmation. */ - /* Only corresponding CPU can update. */ -}; -static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_flip_flag_values, rcu_flip_flag) - = rcu_flip_seen; - -/* - * Enum and per-CPU flag to determine when each CPU has executed the - * needed memory barrier to fence in memory references from its last RCU - * read-side critical section in the just-completed grace period. - */ - -enum rcu_mb_flag_values { - rcu_mb_done, /* Steady/initial state, no mb()s required. */ - /* Only GP detector can update. */ - rcu_mb_needed /* Flip just completed, need an mb(). */ - /* Only corresponding CPU can update. */ -}; -static DEFINE_PER_CPU_SHARED_ALIGNED(enum rcu_mb_flag_values, rcu_mb_flag) - = rcu_mb_done; - -/* - * RCU_DATA_ME: find the current CPU's rcu_data structure. - * RCU_DATA_CPU: find the specified CPU's rcu_data structure. - */ -#define RCU_DATA_ME() (&__get_cpu_var(rcu_data)) -#define RCU_DATA_CPU(cpu) (&per_cpu(rcu_data, cpu)) - -/* - * Helper macro for tracing when the appropriate rcu_data is not - * cached in a local variable, but where the CPU number is so cached. - */ -#define RCU_TRACE_CPU(f, cpu) RCU_TRACE(f, &(RCU_DATA_CPU(cpu)->trace)); - -/* - * Helper macro for tracing when the appropriate rcu_data is not - * cached in a local variable. - */ -#define RCU_TRACE_ME(f) RCU_TRACE(f, &(RCU_DATA_ME()->trace)); - -/* - * Helper macro for tracing when the appropriate rcu_data is pointed - * to by a local variable. - */ -#define RCU_TRACE_RDP(f, rdp) RCU_TRACE(f, &((rdp)->trace)); - -/* - * Return the number of RCU batches processed thus far. Useful - * for debug and statistics. - */ -long rcu_batches_completed(void) -{ - return rcu_ctrlblk.completed; -} -EXPORT_SYMBOL_GPL(rcu_batches_completed); - -EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); - -void __rcu_read_lock(void) -{ - int idx; - struct task_struct *t = current; - int nesting; - - nesting = ACCESS_ONCE(t->rcu_read_lock_nesting); - if (nesting != 0) { - - /* An earlier rcu_read_lock() covers us, just count it. */ - - t->rcu_read_lock_nesting = nesting + 1; - - } else { - unsigned long flags; - - /* - * We disable interrupts for the following reasons: - * - If we get scheduling clock interrupt here, and we - * end up acking the counter flip, it's like a promise - * that we will never increment the old counter again. - * Thus we will break that promise if that - * scheduling clock interrupt happens between the time - * we pick the .completed field and the time that we - * increment our counter. - * - * - We don't want to be preempted out here. - * - * NMIs can still occur, of course, and might themselves - * contain rcu_read_lock(). - */ - - local_irq_save(flags); - - /* - * Outermost nesting of rcu_read_lock(), so increment - * the current counter for the current CPU. Use volatile - * casts to prevent the compiler from reordering. - */ - - idx = ACCESS_ONCE(rcu_ctrlblk.completed) & 0x1; - ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])++; - - /* - * Now that the per-CPU counter has been incremented, we - * are protected from races with rcu_read_lock() invoked - * from NMI handlers on this CPU. We can therefore safely - * increment the nesting counter, relieving further NMIs - * of the need to increment the per-CPU counter. - */ - - ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting + 1; - - /* - * Now that we have preventing any NMIs from storing - * to the ->rcu_flipctr_idx, we can safely use it to - * remember which counter to decrement in the matching - * rcu_read_unlock(). - */ - - ACCESS_ONCE(t->rcu_flipctr_idx) = idx; - local_irq_restore(flags); - } -} -EXPORT_SYMBOL_GPL(__rcu_read_lock); - -void __rcu_read_unlock(void) -{ - int idx; - struct task_struct *t = current; - int nesting; - - nesting = ACCESS_ONCE(t->rcu_read_lock_nesting); - if (nesting > 1) { - - /* - * We are still protected by the enclosing rcu_read_lock(), - * so simply decrement the counter. - */ - - t->rcu_read_lock_nesting = nesting - 1; - - } else { - unsigned long flags; - - /* - * Disable local interrupts to prevent the grace-period - * detection state machine from seeing us half-done. - * NMIs can still occur, of course, and might themselves - * contain rcu_read_lock() and rcu_read_unlock(). - */ - - local_irq_save(flags); - - /* - * Outermost nesting of rcu_read_unlock(), so we must - * decrement the current counter for the current CPU. - * This must be done carefully, because NMIs can - * occur at any point in this code, and any rcu_read_lock() - * and rcu_read_unlock() pairs in the NMI handlers - * must interact non-destructively with this code. - * Lots of volatile casts, and -very- careful ordering. - * - * Changes to this code, including this one, must be - * inspected, validated, and tested extremely carefully!!! - */ - - /* - * First, pick up the index. - */ - - idx = ACCESS_ONCE(t->rcu_flipctr_idx); - - /* - * Now that we have fetched the counter index, it is - * safe to decrement the per-task RCU nesting counter. - * After this, any interrupts or NMIs will increment and - * decrement the per-CPU counters. - */ - ACCESS_ONCE(t->rcu_read_lock_nesting) = nesting - 1; - - /* - * It is now safe to decrement this task's nesting count. - * NMIs that occur after this statement will route their - * rcu_read_lock() calls through this "else" clause, and - * will thus start incrementing the per-CPU counter on - * their own. They will also clobber ->rcu_flipctr_idx, - * but that is OK, since we have already fetched it. - */ - - ACCESS_ONCE(RCU_DATA_ME()->rcu_flipctr[idx])--; - local_irq_restore(flags); - } -} -EXPORT_SYMBOL_GPL(__rcu_read_unlock); - -/* - * If a global counter flip has occurred since the last time that we - * advanced callbacks, advance them. Hardware interrupts must be - * disabled when calling this function. - */ -static void __rcu_advance_callbacks(struct rcu_data *rdp) -{ - int cpu; - int i; - int wlc = 0; - - if (rdp->completed != rcu_ctrlblk.completed) { - if (rdp->waitlist[GP_STAGES - 1] != NULL) { - *rdp->donetail = rdp->waitlist[GP_STAGES - 1]; - rdp->donetail = rdp->waittail[GP_STAGES - 1]; - RCU_TRACE_RDP(rcupreempt_trace_move2done, rdp); - } - for (i = GP_STAGES - 2; i >= 0; i--) { - if (rdp->waitlist[i] != NULL) { - rdp->waitlist[i + 1] = rdp->waitlist[i]; - rdp->waittail[i + 1] = rdp->waittail[i]; - wlc++; - } else { - rdp->waitlist[i + 1] = NULL; - rdp->waittail[i + 1] = - &rdp->waitlist[i + 1]; - } - } - if (rdp->nextlist != NULL) { - rdp->waitlist[0] = rdp->nextlist; - rdp->waittail[0] = rdp->nexttail; - wlc++; - rdp->nextlist = NULL; - rdp->nexttail = &rdp->nextlist; - RCU_TRACE_RDP(rcupreempt_trace_move2wait, rdp); - } else { - rdp->waitlist[0] = NULL; - rdp->waittail[0] = &rdp->waitlist[0]; - } - rdp->waitlistcount = wlc; - rdp->completed = rcu_ctrlblk.completed; - } - - /* - * Check to see if this CPU needs to report that it has seen - * the most recent counter flip, thereby declaring that all - * subsequent rcu_read_lock() invocations will respect this flip. - */ - - cpu = raw_smp_processor_id(); - if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) { - smp_mb(); /* Subsequent counter accesses must see new value */ - per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen; - smp_mb(); /* Subsequent RCU read-side critical sections */ - /* seen -after- acknowledgement. */ - } -} - -/* - * Get here when RCU is idle. Decide whether we need to - * move out of idle state, and return non-zero if so. - * "Straightforward" approach for the moment, might later - * use callback-list lengths, grace-period duration, or - * some such to determine when to exit idle state. - * Might also need a pre-idle test that does not acquire - * the lock, but let's get the simple case working first... - */ - -static int -rcu_try_flip_idle(void) -{ - int cpu; - - RCU_TRACE_ME(rcupreempt_trace_try_flip_i1); - if (!rcu_pending(smp_processor_id())) { - RCU_TRACE_ME(rcupreempt_trace_try_flip_ie1); - return 0; - } - - /* - * Do the flip. - */ - - RCU_TRACE_ME(rcupreempt_trace_try_flip_g1); - rcu_ctrlblk.completed++; /* stands in for rcu_try_flip_g2 */ - - /* - * Need a memory barrier so that other CPUs see the new - * counter value before they see the subsequent change of all - * the rcu_flip_flag instances to rcu_flipped. - */ - - smp_mb(); /* see above block comment. */ - - /* Now ask each CPU for acknowledgement of the flip. */ - - for_each_cpu_mask(cpu, rcu_cpu_online_map) - per_cpu(rcu_flip_flag, cpu) = rcu_flipped; - - return 1; -} - -/* - * Wait for CPUs to acknowledge the flip. - */ - -static int -rcu_try_flip_waitack(void) -{ - int cpu; - - RCU_TRACE_ME(rcupreempt_trace_try_flip_a1); - for_each_cpu_mask(cpu, rcu_cpu_online_map) - if (per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) { - RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1); - return 0; - } - - /* - * Make sure our checks above don't bleed into subsequent - * waiting for the sum of the counters to reach zero. - */ - - smp_mb(); /* see above block comment. */ - RCU_TRACE_ME(rcupreempt_trace_try_flip_a2); - return 1; -} - -/* - * Wait for collective ``last'' counter to reach zero, - * then tell all CPUs to do an end-of-grace-period memory barrier. - */ - -static int -rcu_try_flip_waitzero(void) -{ - int cpu; - int lastidx = !(rcu_ctrlblk.completed & 0x1); - int sum = 0; - - /* Check to see if the sum of the "last" counters is zero. */ - - RCU_TRACE_ME(rcupreempt_trace_try_flip_z1); - for_each_cpu_mask(cpu, rcu_cpu_online_map) - sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx]; - if (sum != 0) { - RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1); - return 0; - } - - /* - * This ensures that the other CPUs see the call for - * memory barriers -after- the sum to zero has been - * detected here - */ - smp_mb(); /* ^^^^^^^^^^^^ */ - - /* Call for a memory barrier from each CPU. */ - for_each_cpu_mask(cpu, rcu_cpu_online_map) - per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed; - - RCU_TRACE_ME(rcupreempt_trace_try_flip_z2); - return 1; -} - -/* - * Wait for all CPUs to do their end-of-grace-period memory barrier. - * Return 0 once all CPUs have done so. - */ - -static int -rcu_try_flip_waitmb(void) -{ - int cpu; - - RCU_TRACE_ME(rcupreempt_trace_try_flip_m1); - for_each_cpu_mask(cpu, rcu_cpu_online_map) - if (per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) { - RCU_TRACE_ME(rcupreempt_trace_try_flip_me1); - return 0; - } - - smp_mb(); /* Ensure that the above checks precede any following flip. */ - RCU_TRACE_ME(rcupreempt_trace_try_flip_m2); - return 1; -} - -/* - * Attempt a single flip of the counters. Remember, a single flip does - * -not- constitute a grace period. Instead, the interval between - * at least GP_STAGES consecutive flips is a grace period. - * - * If anyone is nuts enough to run this CONFIG_PREEMPT_RCU implementation - * on a large SMP, they might want to use a hierarchical organization of - * the per-CPU-counter pairs. - */ -static void rcu_try_flip(void) -{ - unsigned long flags; - - RCU_TRACE_ME(rcupreempt_trace_try_flip_1); - if (unlikely(!spin_trylock_irqsave(&rcu_ctrlblk.fliplock, flags))) { - RCU_TRACE_ME(rcupreempt_trace_try_flip_e1); - return; - } - - /* - * Take the next transition(s) through the RCU grace-period - * flip-counter state machine. - */ - - switch (rcu_ctrlblk.rcu_try_flip_state) { - case rcu_try_flip_idle_state: - if (rcu_try_flip_idle()) - rcu_ctrlblk.rcu_try_flip_state = - rcu_try_flip_waitack_state; - break; - case rcu_try_flip_waitack_state: - if (rcu_try_flip_waitack()) - rcu_ctrlblk.rcu_try_flip_state = - rcu_try_flip_waitzero_state; - break; - case rcu_try_flip_waitzero_state: - if (rcu_try_flip_waitzero()) - rcu_ctrlblk.rcu_try_flip_state = - rcu_try_flip_waitmb_state; - break; - case rcu_try_flip_waitmb_state: - if (rcu_try_flip_waitmb()) - rcu_ctrlblk.rcu_try_flip_state = - rcu_try_flip_idle_state; - } - spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); -} - -/* - * Check to see if this CPU needs to do a memory barrier in order to - * ensure that any prior RCU read-side critical sections have committed - * their counter manipulations and critical-section memory references - * before declaring the grace period to be completed. - */ -static void rcu_check_mb(int cpu) -{ - if (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed) { - smp_mb(); /* Ensure RCU read-side accesses are visible. */ - per_cpu(rcu_mb_flag, cpu) = rcu_mb_done; - } -} - -void rcu_check_callbacks(int cpu, int user) -{ - unsigned long flags; - struct rcu_data *rdp = RCU_DATA_CPU(cpu); - - rcu_check_mb(cpu); - if (rcu_ctrlblk.completed == rdp->completed) - rcu_try_flip(); - spin_lock_irqsave(&rdp->lock, flags); - RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp); - __rcu_advance_callbacks(rdp); - if (rdp->donelist == NULL) { - spin_unlock_irqrestore(&rdp->lock, flags); - } else { - spin_unlock_irqrestore(&rdp->lock, flags); - raise_softirq(RCU_SOFTIRQ); - } -} - -/* - * Needed by dynticks, to make sure all RCU processing has finished - * when we go idle: - */ -void rcu_advance_callbacks(int cpu, int user) -{ - unsigned long flags; - struct rcu_data *rdp = RCU_DATA_CPU(cpu); - - if (rcu_ctrlblk.completed == rdp->completed) { - rcu_try_flip(); - if (rcu_ctrlblk.completed == rdp->completed) - return; - } - spin_lock_irqsave(&rdp->lock, flags); - RCU_TRACE_RDP(rcupreempt_trace_check_callbacks, rdp); - __rcu_advance_callbacks(rdp); - spin_unlock_irqrestore(&rdp->lock, flags); -} - -#ifdef CONFIG_HOTPLUG_CPU -#define rcu_offline_cpu_enqueue(srclist, srctail, dstlist, dsttail) do { \ - *dsttail = srclist; \ - if (srclist != NULL) { \ - dsttail = srctail; \ - srclist = NULL; \ - srctail = &srclist;\ - } \ - } while (0) - -void rcu_offline_cpu(int cpu) -{ - int i; - struct rcu_head *list = NULL; - unsigned long flags; - struct rcu_data *rdp = RCU_DATA_CPU(cpu); - struct rcu_head **tail = &list; - - /* - * Remove all callbacks from the newly dead CPU, retaining order. - * Otherwise rcu_barrier() will fail - */ - - spin_lock_irqsave(&rdp->lock, flags); - rcu_offline_cpu_enqueue(rdp->donelist, rdp->donetail, list, tail); - for (i = GP_STAGES - 1; i >= 0; i--) - rcu_offline_cpu_enqueue(rdp->waitlist[i], rdp->waittail[i], - list, tail); - rcu_offline_cpu_enqueue(rdp->nextlist, rdp->nexttail, list, tail); - spin_unlock_irqrestore(&rdp->lock, flags); - rdp->waitlistcount = 0; - - /* Disengage the newly dead CPU from the grace-period computation. */ - - spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); - rcu_check_mb(cpu); - if (per_cpu(rcu_flip_flag, cpu) == rcu_flipped) { - smp_mb(); /* Subsequent counter accesses must see new value */ - per_cpu(rcu_flip_flag, cpu) = rcu_flip_seen; - smp_mb(); /* Subsequent RCU read-side critical sections */ - /* seen -after- acknowledgement. */ - } - - RCU_DATA_ME()->rcu_flipctr[0] += RCU_DATA_CPU(cpu)->rcu_flipctr[0]; - RCU_DATA_ME()->rcu_flipctr[1] += RCU_DATA_CPU(cpu)->rcu_flipctr[1]; - - RCU_DATA_CPU(cpu)->rcu_flipctr[0] = 0; - RCU_DATA_CPU(cpu)->rcu_flipctr[1] = 0; - - cpu_clear(cpu, rcu_cpu_online_map); - - spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); - - /* - * Place the removed callbacks on the current CPU's queue. - * Make them all start a new grace period: simple approach, - * in theory could starve a given set of callbacks, but - * you would need to be doing some serious CPU hotplugging - * to make this happen. If this becomes a problem, adding - * a synchronize_rcu() to the hotplug path would be a simple - * fix. - */ - - rdp = RCU_DATA_ME(); - spin_lock_irqsave(&rdp->lock, flags); - *rdp->nexttail = list; - if (list) - rdp->nexttail = tail; - spin_unlock_irqrestore(&rdp->lock, flags); -} - -void __devinit rcu_online_cpu(int cpu) -{ - unsigned long flags; - - spin_lock_irqsave(&rcu_ctrlblk.fliplock, flags); - cpu_set(cpu, rcu_cpu_online_map); - spin_unlock_irqrestore(&rcu_ctrlblk.fliplock, flags); -} - -#else /* #ifdef CONFIG_HOTPLUG_CPU */ - -void rcu_offline_cpu(int cpu) -{ -} - -void __devinit rcu_online_cpu(int cpu) -{ -} - -#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ - -static void rcu_process_callbacks(struct softirq_action *unused) -{ - unsigned long flags; - struct rcu_head *next, *list; - struct rcu_data *rdp = RCU_DATA_ME(); - - spin_lock_irqsave(&rdp->lock, flags); - list = rdp->donelist; - if (list == NULL) { - spin_unlock_irqrestore(&rdp->lock, flags); - return; - } - rdp->donelist = NULL; - rdp->donetail = &rdp->donelist; - RCU_TRACE_RDP(rcupreempt_trace_done_remove, rdp); - spin_unlock_irqrestore(&rdp->lock, flags); - while (list) { - next = list->next; - list->func(list); - list = next; - RCU_TRACE_ME(rcupreempt_trace_invoke); - } -} - -void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) -{ - unsigned long flags; - struct rcu_data *rdp; - - head->func = func; - head->next = NULL; - local_irq_save(flags); - rdp = RCU_DATA_ME(); - spin_lock(&rdp->lock); - __rcu_advance_callbacks(rdp); - *rdp->nexttail = head; - rdp->nexttail = &head->next; - RCU_TRACE_RDP(rcupreempt_trace_next_add, rdp); - spin_unlock(&rdp->lock); - local_irq_restore(flags); -} -EXPORT_SYMBOL_GPL(call_rcu); - -/* - * Wait until all currently running preempt_disable() code segments - * (including hardware-irq-disable segments) complete. Note that - * in -rt this does -not- necessarily result in all currently executing - * interrupt -handlers- having completed. - */ -void __synchronize_sched(void) -{ - cpumask_t oldmask; - int cpu; - - if (sched_getaffinity(0, &oldmask) < 0) - oldmask = cpu_possible_map; - for_each_online_cpu(cpu) { - sched_setaffinity(0, cpumask_of_cpu(cpu)); - schedule(); - } - sched_setaffinity(0, oldmask); -} -EXPORT_SYMBOL_GPL(__synchronize_sched); - -/* - * Check to see if any future RCU-related work will need to be done - * by the current CPU, even if none need be done immediately, returning - * 1 if so. Assumes that notifiers would take care of handling any - * outstanding requests from the RCU core. - * - * This function is part of the RCU implementation; it is -not- - * an exported member of the RCU API. - */ -int rcu_needs_cpu(int cpu) -{ - struct rcu_data *rdp = RCU_DATA_CPU(cpu); - - return (rdp->donelist != NULL || - !!rdp->waitlistcount || - rdp->nextlist != NULL); -} - -int rcu_pending(int cpu) -{ - struct rcu_data *rdp = RCU_DATA_CPU(cpu); - - /* The CPU has at least one callback queued somewhere. */ - - if (rdp->donelist != NULL || - !!rdp->waitlistcount || - rdp->nextlist != NULL) - return 1; - - /* The RCU core needs an acknowledgement from this CPU. */ - - if ((per_cpu(rcu_flip_flag, cpu) == rcu_flipped) || - (per_cpu(rcu_mb_flag, cpu) == rcu_mb_needed)) - return 1; - - /* This CPU has fallen behind the global grace-period number. */ - - if (rdp->completed != rcu_ctrlblk.completed) - return 1; - - /* Nothing needed from this CPU. */ - - return 0; -} - -static int __cpuinit rcu_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - long cpu = (long)hcpu; - - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - rcu_online_cpu(cpu); - break; - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - case CPU_DEAD: - case CPU_DEAD_FROZEN: - rcu_offline_cpu(cpu); - break; - default: - break; - } - return NOTIFY_OK; -} - -static struct notifier_block __cpuinitdata rcu_nb = { - .notifier_call = rcu_cpu_notify, -}; - -void __init __rcu_init(void) -{ - int cpu; - int i; - struct rcu_data *rdp; - - printk(KERN_NOTICE "Preemptible RCU implementation.\n"); - for_each_possible_cpu(cpu) { - rdp = RCU_DATA_CPU(cpu); - spin_lock_init(&rdp->lock); - rdp->completed = 0; - rdp->waitlistcount = 0; - rdp->nextlist = NULL; - rdp->nexttail = &rdp->nextlist; - for (i = 0; i < GP_STAGES; i++) { - rdp->waitlist[i] = NULL; - rdp->waittail[i] = &rdp->waitlist[i]; - } - rdp->donelist = NULL; - rdp->donetail = &rdp->donelist; - rdp->rcu_flipctr[0] = 0; - rdp->rcu_flipctr[1] = 0; - } - register_cpu_notifier(&rcu_nb); - - /* - * We don't need protection against CPU-Hotplug here - * since - * a) If a CPU comes online while we are iterating over the - * cpu_online_map below, we would only end up making a - * duplicate call to rcu_online_cpu() which sets the corresponding - * CPU's mask in the rcu_cpu_online_map. - * - * b) A CPU cannot go offline at this point in time since the user - * does not have access to the sysfs interface, nor do we - * suspend the system. - */ - for_each_online_cpu(cpu) - rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long) cpu); - - open_softirq(RCU_SOFTIRQ, rcu_process_callbacks, NULL); -} - -/* - * Deprecated, use synchronize_rcu() or synchronize_sched() instead. - */ -void synchronize_kernel(void) -{ - synchronize_rcu(); -} - -#ifdef CONFIG_RCU_TRACE -long *rcupreempt_flipctr(int cpu) -{ - return &RCU_DATA_CPU(cpu)->rcu_flipctr[0]; -} -EXPORT_SYMBOL_GPL(rcupreempt_flipctr); - -int rcupreempt_flip_flag(int cpu) -{ - return per_cpu(rcu_flip_flag, cpu); -} -EXPORT_SYMBOL_GPL(rcupreempt_flip_flag); - -int rcupreempt_mb_flag(int cpu) -{ - return per_cpu(rcu_mb_flag, cpu); -} -EXPORT_SYMBOL_GPL(rcupreempt_mb_flag); - -char *rcupreempt_try_flip_state_name(void) -{ - return rcu_try_flip_state_names[rcu_ctrlblk.rcu_try_flip_state]; -} -EXPORT_SYMBOL_GPL(rcupreempt_try_flip_state_name); - -struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu) -{ - struct rcu_data *rdp = RCU_DATA_CPU(cpu); - - return &rdp->trace; -} -EXPORT_SYMBOL_GPL(rcupreempt_trace_cpu); - -#endif /* #ifdef RCU_TRACE */ diff --git a/trunk/kernel/rcupreempt_trace.c b/trunk/kernel/rcupreempt_trace.c deleted file mode 100644 index 49ac4947af24..000000000000 --- a/trunk/kernel/rcupreempt_trace.c +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Read-Copy Update tracing for realtime implementation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright IBM Corporation, 2006 - * - * Papers: http://www.rdrop.com/users/paulmck/RCU - * - * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU/ *.txt - * - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static struct mutex rcupreempt_trace_mutex; -static char *rcupreempt_trace_buf; -#define RCUPREEMPT_TRACE_BUF_SIZE 4096 - -void rcupreempt_trace_move2done(struct rcupreempt_trace *trace) -{ - trace->done_length += trace->wait_length; - trace->done_add += trace->wait_length; - trace->wait_length = 0; -} -void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace) -{ - trace->wait_length += trace->next_length; - trace->wait_add += trace->next_length; - trace->next_length = 0; -} -void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace) -{ - atomic_inc(&trace->rcu_try_flip_1); -} -void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace) -{ - atomic_inc(&trace->rcu_try_flip_e1); -} -void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace) -{ - trace->rcu_try_flip_i1++; -} -void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace) -{ - trace->rcu_try_flip_ie1++; -} -void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace) -{ - trace->rcu_try_flip_g1++; -} -void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace) -{ - trace->rcu_try_flip_a1++; -} -void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace) -{ - trace->rcu_try_flip_ae1++; -} -void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace) -{ - trace->rcu_try_flip_a2++; -} -void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace) -{ - trace->rcu_try_flip_z1++; -} -void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace) -{ - trace->rcu_try_flip_ze1++; -} -void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace) -{ - trace->rcu_try_flip_z2++; -} -void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace) -{ - trace->rcu_try_flip_m1++; -} -void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace) -{ - trace->rcu_try_flip_me1++; -} -void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace) -{ - trace->rcu_try_flip_m2++; -} -void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace) -{ - trace->rcu_check_callbacks++; -} -void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace) -{ - trace->done_remove += trace->done_length; - trace->done_length = 0; -} -void rcupreempt_trace_invoke(struct rcupreempt_trace *trace) -{ - atomic_inc(&trace->done_invoked); -} -void rcupreempt_trace_next_add(struct rcupreempt_trace *trace) -{ - trace->next_add++; - trace->next_length++; -} - -static void rcupreempt_trace_sum(struct rcupreempt_trace *sp) -{ - struct rcupreempt_trace *cp; - int cpu; - - memset(sp, 0, sizeof(*sp)); - for_each_possible_cpu(cpu) { - cp = rcupreempt_trace_cpu(cpu); - sp->next_length += cp->next_length; - sp->next_add += cp->next_add; - sp->wait_length += cp->wait_length; - sp->wait_add += cp->wait_add; - sp->done_length += cp->done_length; - sp->done_add += cp->done_add; - sp->done_remove += cp->done_remove; - atomic_set(&sp->done_invoked, atomic_read(&cp->done_invoked)); - sp->rcu_check_callbacks += cp->rcu_check_callbacks; - atomic_set(&sp->rcu_try_flip_1, - atomic_read(&cp->rcu_try_flip_1)); - atomic_set(&sp->rcu_try_flip_e1, - atomic_read(&cp->rcu_try_flip_e1)); - sp->rcu_try_flip_i1 += cp->rcu_try_flip_i1; - sp->rcu_try_flip_ie1 += cp->rcu_try_flip_ie1; - sp->rcu_try_flip_g1 += cp->rcu_try_flip_g1; - sp->rcu_try_flip_a1 += cp->rcu_try_flip_a1; - sp->rcu_try_flip_ae1 += cp->rcu_try_flip_ae1; - sp->rcu_try_flip_a2 += cp->rcu_try_flip_a2; - sp->rcu_try_flip_z1 += cp->rcu_try_flip_z1; - sp->rcu_try_flip_ze1 += cp->rcu_try_flip_ze1; - sp->rcu_try_flip_z2 += cp->rcu_try_flip_z2; - sp->rcu_try_flip_m1 += cp->rcu_try_flip_m1; - sp->rcu_try_flip_me1 += cp->rcu_try_flip_me1; - sp->rcu_try_flip_m2 += cp->rcu_try_flip_m2; - } -} - -static ssize_t rcustats_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) -{ - struct rcupreempt_trace trace; - ssize_t bcount; - int cnt = 0; - - rcupreempt_trace_sum(&trace); - mutex_lock(&rcupreempt_trace_mutex); - snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE - cnt, - "ggp=%ld rcc=%ld\n", - rcu_batches_completed(), - trace.rcu_check_callbacks); - snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE - cnt, - "na=%ld nl=%ld wa=%ld wl=%ld da=%ld dl=%ld dr=%ld di=%d\n" - "1=%d e1=%d i1=%ld ie1=%ld g1=%ld a1=%ld ae1=%ld a2=%ld\n" - "z1=%ld ze1=%ld z2=%ld m1=%ld me1=%ld m2=%ld\n", - - trace.next_add, trace.next_length, - trace.wait_add, trace.wait_length, - trace.done_add, trace.done_length, - trace.done_remove, atomic_read(&trace.done_invoked), - atomic_read(&trace.rcu_try_flip_1), - atomic_read(&trace.rcu_try_flip_e1), - trace.rcu_try_flip_i1, trace.rcu_try_flip_ie1, - trace.rcu_try_flip_g1, - trace.rcu_try_flip_a1, trace.rcu_try_flip_ae1, - trace.rcu_try_flip_a2, - trace.rcu_try_flip_z1, trace.rcu_try_flip_ze1, - trace.rcu_try_flip_z2, - trace.rcu_try_flip_m1, trace.rcu_try_flip_me1, - trace.rcu_try_flip_m2); - bcount = simple_read_from_buffer(buffer, count, ppos, - rcupreempt_trace_buf, strlen(rcupreempt_trace_buf)); - mutex_unlock(&rcupreempt_trace_mutex); - return bcount; -} - -static ssize_t rcugp_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) -{ - long oldgp = rcu_batches_completed(); - ssize_t bcount; - - mutex_lock(&rcupreempt_trace_mutex); - synchronize_rcu(); - snprintf(rcupreempt_trace_buf, RCUPREEMPT_TRACE_BUF_SIZE, - "oldggp=%ld newggp=%ld\n", oldgp, rcu_batches_completed()); - bcount = simple_read_from_buffer(buffer, count, ppos, - rcupreempt_trace_buf, strlen(rcupreempt_trace_buf)); - mutex_unlock(&rcupreempt_trace_mutex); - return bcount; -} - -static ssize_t rcuctrs_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) -{ - int cnt = 0; - int cpu; - int f = rcu_batches_completed() & 0x1; - ssize_t bcount; - - mutex_lock(&rcupreempt_trace_mutex); - - cnt += snprintf(&rcupreempt_trace_buf[cnt], RCUPREEMPT_TRACE_BUF_SIZE, - "CPU last cur F M\n"); - for_each_online_cpu(cpu) { - long *flipctr = rcupreempt_flipctr(cpu); - cnt += snprintf(&rcupreempt_trace_buf[cnt], - RCUPREEMPT_TRACE_BUF_SIZE - cnt, - "%3d %4ld %3ld %d %d\n", - cpu, - flipctr[!f], - flipctr[f], - rcupreempt_flip_flag(cpu), - rcupreempt_mb_flag(cpu)); - } - cnt += snprintf(&rcupreempt_trace_buf[cnt], - RCUPREEMPT_TRACE_BUF_SIZE - cnt, - "ggp = %ld, state = %s\n", - rcu_batches_completed(), - rcupreempt_try_flip_state_name()); - cnt += snprintf(&rcupreempt_trace_buf[cnt], - RCUPREEMPT_TRACE_BUF_SIZE - cnt, - "\n"); - bcount = simple_read_from_buffer(buffer, count, ppos, - rcupreempt_trace_buf, strlen(rcupreempt_trace_buf)); - mutex_unlock(&rcupreempt_trace_mutex); - return bcount; -} - -static struct file_operations rcustats_fops = { - .owner = THIS_MODULE, - .read = rcustats_read, -}; - -static struct file_operations rcugp_fops = { - .owner = THIS_MODULE, - .read = rcugp_read, -}; - -static struct file_operations rcuctrs_fops = { - .owner = THIS_MODULE, - .read = rcuctrs_read, -}; - -static struct dentry *rcudir, *statdir, *ctrsdir, *gpdir; -static int rcupreempt_debugfs_init(void) -{ - rcudir = debugfs_create_dir("rcu", NULL); - if (!rcudir) - goto out; - statdir = debugfs_create_file("rcustats", 0444, rcudir, - NULL, &rcustats_fops); - if (!statdir) - goto free_out; - - gpdir = debugfs_create_file("rcugp", 0444, rcudir, NULL, &rcugp_fops); - if (!gpdir) - goto free_out; - - ctrsdir = debugfs_create_file("rcuctrs", 0444, rcudir, - NULL, &rcuctrs_fops); - if (!ctrsdir) - goto free_out; - return 0; -free_out: - if (statdir) - debugfs_remove(statdir); - if (gpdir) - debugfs_remove(gpdir); - debugfs_remove(rcudir); -out: - return 1; -} - -static int __init rcupreempt_trace_init(void) -{ - mutex_init(&rcupreempt_trace_mutex); - rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL); - if (!rcupreempt_trace_buf) - return 1; - return rcupreempt_debugfs_init(); -} - -static void __exit rcupreempt_trace_cleanup(void) -{ - debugfs_remove(statdir); - debugfs_remove(gpdir); - debugfs_remove(ctrsdir); - debugfs_remove(rcudir); - kfree(rcupreempt_trace_buf); -} - - -module_init(rcupreempt_trace_init); -module_exit(rcupreempt_trace_cleanup); diff --git a/trunk/kernel/rcutorture.c b/trunk/kernel/rcutorture.c index fd599829e72a..c3e165c2318f 100644 --- a/trunk/kernel/rcutorture.c +++ b/trunk/kernel/rcutorture.c @@ -726,11 +726,11 @@ static void rcu_torture_shuffle_tasks(void) cpumask_t tmp_mask = CPU_MASK_ALL; int i; - get_online_cpus(); + lock_cpu_hotplug(); /* No point in shuffling if there is only one online CPU (ex: UP) */ if (num_online_cpus() == 1) { - put_online_cpus(); + unlock_cpu_hotplug(); return; } @@ -762,7 +762,7 @@ static void rcu_torture_shuffle_tasks(void) else rcu_idle_cpu--; - put_online_cpus(); + unlock_cpu_hotplug(); } /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 524285e46fa7..e76b11ca6df3 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -22,8 +22,6 @@ * by Peter Williams * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri - * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, - * Thomas Gleixner, Mike Kravetz */ #include @@ -65,7 +63,6 @@ #include #include #include -#include #include #include @@ -99,9 +96,10 @@ unsigned long long __attribute__((weak)) sched_clock(void) #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO)) /* - * Helpers for converting nanosecond timing to jiffy resolution + * Some helpers for converting nanosecond timing to jiffy resolution */ #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) +#define JIFFIES_TO_NS(TIME) ((TIME) * (NSEC_PER_SEC / HZ)) #define NICE_0_LOAD SCHED_LOAD_SCALE #define NICE_0_SHIFT SCHED_LOAD_SHIFT @@ -161,8 +159,6 @@ struct rt_prio_array { struct cfs_rq; -static LIST_HEAD(task_groups); - /* task group related information */ struct task_group { #ifdef CONFIG_FAIR_CGROUP_SCHED @@ -172,50 +168,10 @@ struct task_group { struct sched_entity **se; /* runqueue "owned" by this group on each cpu */ struct cfs_rq **cfs_rq; - - struct sched_rt_entity **rt_se; - struct rt_rq **rt_rq; - - unsigned int rt_ratio; - - /* - * shares assigned to a task group governs how much of cpu bandwidth - * is allocated to the group. The more shares a group has, the more is - * the cpu bandwidth allocated to it. - * - * For ex, lets say that there are three task groups, A, B and C which - * have been assigned shares 1000, 2000 and 3000 respectively. Then, - * cpu bandwidth allocated by the scheduler to task groups A, B and C - * should be: - * - * Bw(A) = 1000/(1000+2000+3000) * 100 = 16.66% - * Bw(B) = 2000/(1000+2000+3000) * 100 = 33.33% - * Bw(C) = 3000/(1000+2000+3000) * 100 = 50% - * - * The weight assigned to a task group's schedulable entities on every - * cpu (task_group.se[a_cpu]->load.weight) is derived from the task - * group's shares. For ex: lets say that task group A has been - * assigned shares of 1000 and there are two CPUs in a system. Then, - * - * tg_A->se[0]->load.weight = tg_A->se[1]->load.weight = 1000; - * - * Note: It's not necessary that each of a task's group schedulable - * entity have the same weight on all CPUs. If the group - * has 2 of its tasks on CPU0 and 1 task on CPU1, then a - * better distribution of weight could be: - * - * tg_A->se[0]->load.weight = 2/3 * 2000 = 1333 - * tg_A->se[1]->load.weight = 1/2 * 2000 = 667 - * - * rebalance_shares() is responsible for distributing the shares of a - * task groups like this among the group's schedulable entities across - * cpus. - * - */ unsigned long shares; - + /* spinlock to serialize modification to shares */ + spinlock_t lock; struct rcu_head rcu; - struct list_head list; }; /* Default task group's sched entity on each cpu */ @@ -223,51 +179,24 @@ static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); /* Default task group's cfs_rq on each cpu */ static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; -static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); -static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; - static struct sched_entity *init_sched_entity_p[NR_CPUS]; static struct cfs_rq *init_cfs_rq_p[NR_CPUS]; -static struct sched_rt_entity *init_sched_rt_entity_p[NR_CPUS]; -static struct rt_rq *init_rt_rq_p[NR_CPUS]; - -/* task_group_mutex serializes add/remove of task groups and also changes to - * a task group's cpu shares. - */ -static DEFINE_MUTEX(task_group_mutex); - -/* doms_cur_mutex serializes access to doms_cur[] array */ -static DEFINE_MUTEX(doms_cur_mutex); - -#ifdef CONFIG_SMP -/* kernel thread that runs rebalance_shares() periodically */ -static struct task_struct *lb_monitor_task; -static int load_balance_monitor(void *unused); -#endif - -static void set_se_shares(struct sched_entity *se, unsigned long shares); - /* Default task group. * Every task in system belong to this group at bootup. */ struct task_group init_task_group = { - .se = init_sched_entity_p, + .se = init_sched_entity_p, .cfs_rq = init_cfs_rq_p, - - .rt_se = init_sched_rt_entity_p, - .rt_rq = init_rt_rq_p, }; #ifdef CONFIG_FAIR_USER_SCHED -# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) +# define INIT_TASK_GRP_LOAD 2*NICE_0_LOAD #else -# define INIT_TASK_GROUP_LOAD NICE_0_LOAD +# define INIT_TASK_GRP_LOAD NICE_0_LOAD #endif -#define MIN_GROUP_SHARES 2 - -static int init_task_group_load = INIT_TASK_GROUP_LOAD; +static int init_task_group_load = INIT_TASK_GRP_LOAD; /* return group to which a task belongs */ static inline struct task_group *task_group(struct task_struct *p) @@ -286,42 +215,15 @@ static inline struct task_group *task_group(struct task_struct *p) } /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ -static inline void set_task_rq(struct task_struct *p, unsigned int cpu) +static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; p->se.parent = task_group(p)->se[cpu]; - - p->rt.rt_rq = task_group(p)->rt_rq[cpu]; - p->rt.parent = task_group(p)->rt_se[cpu]; -} - -static inline void lock_task_group_list(void) -{ - mutex_lock(&task_group_mutex); -} - -static inline void unlock_task_group_list(void) -{ - mutex_unlock(&task_group_mutex); -} - -static inline void lock_doms_cur(void) -{ - mutex_lock(&doms_cur_mutex); -} - -static inline void unlock_doms_cur(void) -{ - mutex_unlock(&doms_cur_mutex); } #else -static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } -static inline void lock_task_group_list(void) { } -static inline void unlock_task_group_list(void) { } -static inline void lock_doms_cur(void) { } -static inline void unlock_doms_cur(void) { } +static inline void set_task_cfs_rq(struct task_struct *p, unsigned int cpu) { } #endif /* CONFIG_FAIR_GROUP_SCHED */ @@ -362,56 +264,10 @@ struct cfs_rq { /* Real-Time classes' related field in a runqueue: */ struct rt_rq { struct rt_prio_array active; - unsigned long rt_nr_running; -#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED - int highest_prio; /* highest queued rt task prio */ -#endif -#ifdef CONFIG_SMP - unsigned long rt_nr_migratory; - int overloaded; -#endif - int rt_throttled; - u64 rt_time; - -#ifdef CONFIG_FAIR_GROUP_SCHED - struct rq *rq; - struct list_head leaf_rt_rq_list; - struct task_group *tg; - struct sched_rt_entity *rt_se; -#endif + int rt_load_balance_idx; + struct list_head *rt_load_balance_head, *rt_load_balance_curr; }; -#ifdef CONFIG_SMP - -/* - * We add the notion of a root-domain which will be used to define per-domain - * variables. Each exclusive cpuset essentially defines an island domain by - * fully partitioning the member cpus from any other cpuset. Whenever a new - * exclusive cpuset is created, we also create and attach a new root-domain - * object. - * - */ -struct root_domain { - atomic_t refcount; - cpumask_t span; - cpumask_t online; - - /* - * The "RT overload" flag: it gets set if a CPU has more than - * one runnable RT task. - */ - cpumask_t rto_mask; - atomic_t rto_count; -}; - -/* - * By default the system creates a single root-domain with all cpus as - * members (mimicking the global state we have today). - */ -static struct root_domain def_root_domain; - -#endif - /* * This is the main, per-CPU runqueue data structure. * @@ -440,15 +296,11 @@ struct rq { u64 nr_switches; struct cfs_rq cfs; - struct rt_rq rt; - u64 rt_period_expire; - int rt_throttled; - #ifdef CONFIG_FAIR_GROUP_SCHED /* list of leaf cfs_rq on this cpu: */ struct list_head leaf_cfs_rq_list; - struct list_head leaf_rt_rq_list; #endif + struct rt_rq rt; /* * This is part of a global counter where only the total sum @@ -465,7 +317,7 @@ struct rq { u64 clock, prev_clock_raw; s64 clock_max_delta; - unsigned int clock_warps, clock_overflows, clock_underflows; + unsigned int clock_warps, clock_overflows; u64 idle_clock; unsigned int clock_deep_idle_events; u64 tick_timestamp; @@ -473,7 +325,6 @@ struct rq { atomic_t nr_iowait; #ifdef CONFIG_SMP - struct root_domain *rd; struct sched_domain *sd; /* For active balancing */ @@ -486,12 +337,6 @@ struct rq { struct list_head migration_queue; #endif -#ifdef CONFIG_SCHED_HRTICK - unsigned long hrtick_flags; - ktime_t hrtick_expire; - struct hrtimer hrtick_timer; -#endif - #ifdef CONFIG_SCHEDSTATS /* latency stats */ struct sched_info rq_sched_info; @@ -518,6 +363,7 @@ struct rq { }; static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); +static DEFINE_MUTEX(sched_hotcpu_mutex); static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) { @@ -595,23 +441,6 @@ static void update_rq_clock(struct rq *rq) #define task_rq(p) cpu_rq(task_cpu(p)) #define cpu_curr(cpu) (cpu_rq(cpu)->curr) -unsigned long rt_needs_cpu(int cpu) -{ - struct rq *rq = cpu_rq(cpu); - u64 delta; - - if (!rq->rt_throttled) - return 0; - - if (rq->clock > rq->rt_period_expire) - return 1; - - delta = rq->rt_period_expire - rq->clock; - do_div(delta, NSEC_PER_SEC / HZ); - - return (unsigned long)delta; -} - /* * Tunables that become constants when CONFIG_SCHED_DEBUG is off: */ @@ -630,8 +459,6 @@ enum { SCHED_FEAT_START_DEBIT = 4, SCHED_FEAT_TREE_AVG = 8, SCHED_FEAT_APPROX_AVG = 16, - SCHED_FEAT_HRTICK = 32, - SCHED_FEAT_DOUBLE_TICK = 64, }; const_debug unsigned int sysctl_sched_features = @@ -639,9 +466,7 @@ const_debug unsigned int sysctl_sched_features = SCHED_FEAT_WAKEUP_PREEMPT * 1 | SCHED_FEAT_START_DEBIT * 1 | SCHED_FEAT_TREE_AVG * 0 | - SCHED_FEAT_APPROX_AVG * 0 | - SCHED_FEAT_HRTICK * 1 | - SCHED_FEAT_DOUBLE_TICK * 0; + SCHED_FEAT_APPROX_AVG * 0; #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) @@ -651,21 +476,6 @@ const_debug unsigned int sysctl_sched_features = */ const_debug unsigned int sysctl_sched_nr_migrate = 32; -/* - * period over which we measure -rt task cpu usage in ms. - * default: 1s - */ -const_debug unsigned int sysctl_sched_rt_period = 1000; - -#define SCHED_RT_FRAC_SHIFT 16 -#define SCHED_RT_FRAC (1UL << SCHED_RT_FRAC_SHIFT) - -/* - * ratio of time -rt tasks may consume. - * default: 95% - */ -const_debug unsigned int sysctl_sched_rt_ratio = 62259; - /* * For kernel-internal use: high-speed (but slightly incorrect) per-cpu * clock constructed from sched_clock(): @@ -858,6 +668,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) struct rq *rq = cpu_rq(smp_processor_id()); u64 now = sched_clock(); + touch_softlockup_watchdog(); rq->idle_clock += delta_ns; /* * Override the previous timestamp and ignore all @@ -869,177 +680,9 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) rq->prev_clock_raw = now; rq->clock += delta_ns; spin_unlock(&rq->lock); - touch_softlockup_watchdog(); } EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); -static void __resched_task(struct task_struct *p, int tif_bit); - -static inline void resched_task(struct task_struct *p) -{ - __resched_task(p, TIF_NEED_RESCHED); -} - -#ifdef CONFIG_SCHED_HRTICK -/* - * Use HR-timers to deliver accurate preemption points. - * - * Its all a bit involved since we cannot program an hrt while holding the - * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a - * reschedule event. - * - * When we get rescheduled we reprogram the hrtick_timer outside of the - * rq->lock. - */ -static inline void resched_hrt(struct task_struct *p) -{ - __resched_task(p, TIF_HRTICK_RESCHED); -} - -static inline void resched_rq(struct rq *rq) -{ - unsigned long flags; - - spin_lock_irqsave(&rq->lock, flags); - resched_task(rq->curr); - spin_unlock_irqrestore(&rq->lock, flags); -} - -enum { - HRTICK_SET, /* re-programm hrtick_timer */ - HRTICK_RESET, /* not a new slice */ -}; - -/* - * Use hrtick when: - * - enabled by features - * - hrtimer is actually high res - */ -static inline int hrtick_enabled(struct rq *rq) -{ - if (!sched_feat(HRTICK)) - return 0; - return hrtimer_is_hres_active(&rq->hrtick_timer); -} - -/* - * Called to set the hrtick timer state. - * - * called with rq->lock held and irqs disabled - */ -static void hrtick_start(struct rq *rq, u64 delay, int reset) -{ - assert_spin_locked(&rq->lock); - - /* - * preempt at: now + delay - */ - rq->hrtick_expire = - ktime_add_ns(rq->hrtick_timer.base->get_time(), delay); - /* - * indicate we need to program the timer - */ - __set_bit(HRTICK_SET, &rq->hrtick_flags); - if (reset) - __set_bit(HRTICK_RESET, &rq->hrtick_flags); - - /* - * New slices are called from the schedule path and don't need a - * forced reschedule. - */ - if (reset) - resched_hrt(rq->curr); -} - -static void hrtick_clear(struct rq *rq) -{ - if (hrtimer_active(&rq->hrtick_timer)) - hrtimer_cancel(&rq->hrtick_timer); -} - -/* - * Update the timer from the possible pending state. - */ -static void hrtick_set(struct rq *rq) -{ - ktime_t time; - int set, reset; - unsigned long flags; - - WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); - - spin_lock_irqsave(&rq->lock, flags); - set = __test_and_clear_bit(HRTICK_SET, &rq->hrtick_flags); - reset = __test_and_clear_bit(HRTICK_RESET, &rq->hrtick_flags); - time = rq->hrtick_expire; - clear_thread_flag(TIF_HRTICK_RESCHED); - spin_unlock_irqrestore(&rq->lock, flags); - - if (set) { - hrtimer_start(&rq->hrtick_timer, time, HRTIMER_MODE_ABS); - if (reset && !hrtimer_active(&rq->hrtick_timer)) - resched_rq(rq); - } else - hrtick_clear(rq); -} - -/* - * High-resolution timer tick. - * Runs from hardirq context with interrupts disabled. - */ -static enum hrtimer_restart hrtick(struct hrtimer *timer) -{ - struct rq *rq = container_of(timer, struct rq, hrtick_timer); - - WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); - - spin_lock(&rq->lock); - __update_rq_clock(rq); - rq->curr->sched_class->task_tick(rq, rq->curr, 1); - spin_unlock(&rq->lock); - - return HRTIMER_NORESTART; -} - -static inline void init_rq_hrtick(struct rq *rq) -{ - rq->hrtick_flags = 0; - hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - rq->hrtick_timer.function = hrtick; - rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; -} - -void hrtick_resched(void) -{ - struct rq *rq; - unsigned long flags; - - if (!test_thread_flag(TIF_HRTICK_RESCHED)) - return; - - local_irq_save(flags); - rq = cpu_rq(smp_processor_id()); - hrtick_set(rq); - local_irq_restore(flags); -} -#else -static inline void hrtick_clear(struct rq *rq) -{ -} - -static inline void hrtick_set(struct rq *rq) -{ -} - -static inline void init_rq_hrtick(struct rq *rq) -{ -} - -void hrtick_resched(void) -{ -} -#endif - /* * resched_task - mark a task 'to be rescheduled now'. * @@ -1053,16 +696,16 @@ void hrtick_resched(void) #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) #endif -static void __resched_task(struct task_struct *p, int tif_bit) +static void resched_task(struct task_struct *p) { int cpu; assert_spin_locked(&task_rq(p)->lock); - if (unlikely(test_tsk_thread_flag(p, tif_bit))) + if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) return; - set_tsk_thread_flag(p, tif_bit); + set_tsk_thread_flag(p, TIF_NEED_RESCHED); cpu = task_cpu(p); if (cpu == smp_processor_id()) @@ -1085,10 +728,10 @@ static void resched_cpu(int cpu) spin_unlock_irqrestore(&rq->lock, flags); } #else -static void __resched_task(struct task_struct *p, int tif_bit) +static inline void resched_task(struct task_struct *p) { assert_spin_locked(&task_rq(p)->lock); - set_tsk_thread_flag(p, tif_bit); + set_tsk_need_resched(p); } #endif @@ -1228,23 +871,6 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime); static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} #endif -static inline void inc_cpu_load(struct rq *rq, unsigned long load) -{ - update_load_add(&rq->load, load); -} - -static inline void dec_cpu_load(struct rq *rq, unsigned long load) -{ - update_load_sub(&rq->load, load); -} - -#ifdef CONFIG_SMP -static unsigned long source_load(int cpu, int type); -static unsigned long target_load(int cpu, int type); -static unsigned long cpu_avg_load_per_task(int cpu); -static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); -#endif /* CONFIG_SMP */ - #include "sched_stats.h" #include "sched_idletask.c" #include "sched_fair.c" @@ -1255,14 +881,41 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); #define sched_class_highest (&rt_sched_class) +/* + * Update delta_exec, delta_fair fields for rq. + * + * delta_fair clock advances at a rate inversely proportional to + * total load (rq->load.weight) on the runqueue, while + * delta_exec advances at the same rate as wall-clock (provided + * cpu is not idle). + * + * delta_exec / delta_fair is a measure of the (smoothened) load on this + * runqueue over any given interval. This (smoothened) load is used + * during load balance. + * + * This function is called /before/ updating rq->load + * and when switching tasks. + */ +static inline void inc_load(struct rq *rq, const struct task_struct *p) +{ + update_load_add(&rq->load, p->se.load.weight); +} + +static inline void dec_load(struct rq *rq, const struct task_struct *p) +{ + update_load_sub(&rq->load, p->se.load.weight); +} + static void inc_nr_running(struct task_struct *p, struct rq *rq) { rq->nr_running++; + inc_load(rq, p); } static void dec_nr_running(struct task_struct *p, struct rq *rq) { rq->nr_running--; + dec_load(rq, p); } static void set_load_weight(struct task_struct *p) @@ -1386,7 +1039,7 @@ unsigned long weighted_cpuload(const int cpu) static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) { - set_task_rq(p, cpu); + set_task_cfs_rq(p, cpu); #ifdef CONFIG_SMP /* * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be @@ -1398,24 +1051,12 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) #endif } -static inline void check_class_changed(struct rq *rq, struct task_struct *p, - const struct sched_class *prev_class, - int oldprio, int running) -{ - if (prev_class != p->sched_class) { - if (prev_class->switched_from) - prev_class->switched_from(rq, p, running); - p->sched_class->switched_to(rq, p, running); - } else - p->sched_class->prio_changed(rq, p, oldprio, running); -} - #ifdef CONFIG_SMP /* * Is this task likely cache-hot: */ -static int +static inline int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) { s64 delta; @@ -1640,7 +1281,7 @@ static unsigned long target_load(int cpu, int type) /* * Return the average load per task on the cpu's run queue */ -static unsigned long cpu_avg_load_per_task(int cpu) +static inline unsigned long cpu_avg_load_per_task(int cpu) { struct rq *rq = cpu_rq(cpu); unsigned long total = weighted_cpuload(cpu); @@ -1797,6 +1438,58 @@ static int sched_balance_self(int cpu, int flag) #endif /* CONFIG_SMP */ +/* + * wake_idle() will wake a task on an idle cpu if task->cpu is + * not idle and an idle cpu is available. The span of cpus to + * search starts with cpus closest then further out as needed, + * so we always favor a closer, idle cpu. + * + * Returns the CPU we should wake onto. + */ +#if defined(ARCH_HAS_SCHED_WAKE_IDLE) +static int wake_idle(int cpu, struct task_struct *p) +{ + cpumask_t tmp; + struct sched_domain *sd; + int i; + + /* + * If it is idle, then it is the best cpu to run this task. + * + * This cpu is also the best, if it has more than one task already. + * Siblings must be also busy(in most cases) as they didn't already + * pickup the extra load from this cpu and hence we need not check + * sibling runqueue info. This will avoid the checks and cache miss + * penalities associated with that. + */ + if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1) + return cpu; + + for_each_domain(cpu, sd) { + if (sd->flags & SD_WAKE_IDLE) { + cpus_and(tmp, sd->span, p->cpus_allowed); + for_each_cpu_mask(i, tmp) { + if (idle_cpu(i)) { + if (i != task_cpu(p)) { + schedstat_inc(p, + se.nr_wakeups_idle); + } + return i; + } + } + } else { + break; + } + } + return cpu; +} +#else +static inline int wake_idle(int cpu, struct task_struct *p) +{ + return cpu; +} +#endif + /*** * try_to_wake_up - wake up a thread * @p: the to-be-woken-up thread @@ -1817,6 +1510,11 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) unsigned long flags; long old_state; struct rq *rq; +#ifdef CONFIG_SMP + struct sched_domain *sd, *this_sd = NULL; + unsigned long load, this_load; + int new_cpu; +#endif rq = task_rq_lock(p, &flags); old_state = p->state; @@ -1834,9 +1532,92 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) if (unlikely(task_running(rq, p))) goto out_activate; - cpu = p->sched_class->select_task_rq(p, sync); - if (cpu != orig_cpu) { - set_task_cpu(p, cpu); + new_cpu = cpu; + + schedstat_inc(rq, ttwu_count); + if (cpu == this_cpu) { + schedstat_inc(rq, ttwu_local); + goto out_set_cpu; + } + + for_each_domain(this_cpu, sd) { + if (cpu_isset(cpu, sd->span)) { + schedstat_inc(sd, ttwu_wake_remote); + this_sd = sd; + break; + } + } + + if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) + goto out_set_cpu; + + /* + * Check for affine wakeup and passive balancing possibilities. + */ + if (this_sd) { + int idx = this_sd->wake_idx; + unsigned int imbalance; + + imbalance = 100 + (this_sd->imbalance_pct - 100) / 2; + + load = source_load(cpu, idx); + this_load = target_load(this_cpu, idx); + + new_cpu = this_cpu; /* Wake to this CPU if we can */ + + if (this_sd->flags & SD_WAKE_AFFINE) { + unsigned long tl = this_load; + unsigned long tl_per_task; + + /* + * Attract cache-cold tasks on sync wakeups: + */ + if (sync && !task_hot(p, rq->clock, this_sd)) + goto out_set_cpu; + + schedstat_inc(p, se.nr_wakeups_affine_attempts); + tl_per_task = cpu_avg_load_per_task(this_cpu); + + /* + * If sync wakeup then subtract the (maximum possible) + * effect of the currently running task from the load + * of the current CPU: + */ + if (sync) + tl -= current->se.load.weight; + + if ((tl <= load && + tl + target_load(cpu, idx) <= tl_per_task) || + 100*(tl + p->se.load.weight) <= imbalance*load) { + /* + * This domain has SD_WAKE_AFFINE and + * p is cache cold in this domain, and + * there is no bad imbalance. + */ + schedstat_inc(this_sd, ttwu_move_affine); + schedstat_inc(p, se.nr_wakeups_affine); + goto out_set_cpu; + } + } + + /* + * Start passive balancing when half the imbalance_pct + * limit is reached. + */ + if (this_sd->flags & SD_WAKE_BALANCE) { + if (imbalance*this_load <= 100*load) { + schedstat_inc(this_sd, ttwu_move_balance); + schedstat_inc(p, se.nr_wakeups_passive); + goto out_set_cpu; + } + } + } + + new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */ +out_set_cpu: + new_cpu = wake_idle(new_cpu, p); + if (new_cpu != cpu) { + set_task_cpu(p, new_cpu); task_rq_unlock(rq, &flags); /* might preempt at this point */ rq = task_rq_lock(p, &flags); @@ -1850,21 +1631,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) cpu = task_cpu(p); } -#ifdef CONFIG_SCHEDSTATS - schedstat_inc(rq, ttwu_count); - if (cpu == this_cpu) - schedstat_inc(rq, ttwu_local); - else { - struct sched_domain *sd; - for_each_domain(this_cpu, sd) { - if (cpu_isset(cpu, sd->span)) { - schedstat_inc(sd, ttwu_wake_remote); - break; - } - } - } -#endif - out_activate: #endif /* CONFIG_SMP */ schedstat_inc(p, se.nr_wakeups); @@ -1883,10 +1649,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) out_running: p->state = TASK_RUNNING; -#ifdef CONFIG_SMP - if (p->sched_class->task_wake_up) - p->sched_class->task_wake_up(rq, p); -#endif out: task_rq_unlock(rq, &flags); @@ -1929,7 +1691,7 @@ static void __sched_fork(struct task_struct *p) p->se.wait_max = 0; #endif - INIT_LIST_HEAD(&p->rt.run_list); + INIT_LIST_HEAD(&p->run_list); p->se.on_rq = 0; #ifdef CONFIG_PREEMPT_NOTIFIERS @@ -2009,10 +1771,6 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) inc_nr_running(p, rq); } check_preempt_curr(rq, p); -#ifdef CONFIG_SMP - if (p->sched_class->task_wake_up) - p->sched_class->task_wake_up(rq, p); -#endif task_rq_unlock(rq, &flags); } @@ -2133,11 +1891,6 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) prev_state = prev->state; finish_arch_switch(prev); finish_lock_switch(rq, prev); -#ifdef CONFIG_SMP - if (current->sched_class->post_schedule) - current->sched_class->post_schedule(rq); -#endif - fire_sched_in_preempt_notifiers(current); if (mm) mmdrop(mm); @@ -2371,13 +2124,11 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) /* * double_lock_balance - lock the busiest runqueue, this_rq is locked already. */ -static int double_lock_balance(struct rq *this_rq, struct rq *busiest) +static void double_lock_balance(struct rq *this_rq, struct rq *busiest) __releases(this_rq->lock) __acquires(busiest->lock) __acquires(this_rq->lock) { - int ret = 0; - if (unlikely(!irqs_disabled())) { /* printk() doesn't work good under rq->lock */ spin_unlock(&this_rq->lock); @@ -2388,11 +2139,9 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) spin_unlock(&this_rq->lock); spin_lock(&busiest->lock); spin_lock(&this_rq->lock); - ret = 1; } else spin_lock(&busiest->lock); } - return ret; } /* @@ -3736,14 +3485,12 @@ void scheduler_tick(void) /* * Let rq->clock advance by at least TICK_NSEC: */ - if (unlikely(rq->clock < next_tick)) { + if (unlikely(rq->clock < next_tick)) rq->clock = next_tick; - rq->clock_underflows++; - } rq->tick_timestamp = rq->clock; update_cpu_load(rq); - curr->sched_class->task_tick(rq, curr, 0); - update_sched_rt_period(rq); + if (curr != rq->idle) /* FIXME: needed? */ + curr->sched_class->task_tick(rq, curr); spin_unlock(&rq->lock); #ifdef CONFIG_SMP @@ -3889,8 +3636,6 @@ asmlinkage void __sched schedule(void) schedule_debug(prev); - hrtick_clear(rq); - /* * Do the rq-clock update outside the rq lock: */ @@ -3909,11 +3654,6 @@ asmlinkage void __sched schedule(void) switch_count = &prev->nvcsw; } -#ifdef CONFIG_SMP - if (prev->sched_class->pre_schedule) - prev->sched_class->pre_schedule(rq, prev); -#endif - if (unlikely(!rq->nr_running)) idle_balance(cpu, rq); @@ -3928,20 +3668,14 @@ asmlinkage void __sched schedule(void) ++*switch_count; context_switch(rq, prev, next); /* unlocks the rq */ - /* - * the context switch might have flipped the stack from under - * us, hence refresh the local variables. - */ - cpu = smp_processor_id(); - rq = cpu_rq(cpu); } else spin_unlock_irq(&rq->lock); - hrtick_set(rq); - - if (unlikely(reacquire_kernel_lock(current) < 0)) + if (unlikely(reacquire_kernel_lock(current) < 0)) { + cpu = smp_processor_id(); + rq = cpu_rq(cpu); goto need_resched_nonpreemptible; - + } preempt_enable_no_resched(); if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) goto need_resched; @@ -3957,9 +3691,10 @@ EXPORT_SYMBOL(schedule); asmlinkage void __sched preempt_schedule(void) { struct thread_info *ti = current_thread_info(); +#ifdef CONFIG_PREEMPT_BKL struct task_struct *task = current; int saved_lock_depth; - +#endif /* * If there is a non-zero preempt_count or interrupts are disabled, * we do not want to preempt the current task. Just return.. @@ -3975,10 +3710,14 @@ asmlinkage void __sched preempt_schedule(void) * clear ->lock_depth so that schedule() doesnt * auto-release the semaphore: */ +#ifdef CONFIG_PREEMPT_BKL saved_lock_depth = task->lock_depth; task->lock_depth = -1; +#endif schedule(); +#ifdef CONFIG_PREEMPT_BKL task->lock_depth = saved_lock_depth; +#endif sub_preempt_count(PREEMPT_ACTIVE); /* @@ -3999,9 +3738,10 @@ EXPORT_SYMBOL(preempt_schedule); asmlinkage void __sched preempt_schedule_irq(void) { struct thread_info *ti = current_thread_info(); +#ifdef CONFIG_PREEMPT_BKL struct task_struct *task = current; int saved_lock_depth; - +#endif /* Catch callers which need to be fixed */ BUG_ON(ti->preempt_count || !irqs_disabled()); @@ -4013,12 +3753,16 @@ asmlinkage void __sched preempt_schedule_irq(void) * clear ->lock_depth so that schedule() doesnt * auto-release the semaphore: */ +#ifdef CONFIG_PREEMPT_BKL saved_lock_depth = task->lock_depth; task->lock_depth = -1; +#endif local_irq_enable(); schedule(); local_irq_disable(); +#ifdef CONFIG_PREEMPT_BKL task->lock_depth = saved_lock_depth; +#endif sub_preempt_count(PREEMPT_ACTIVE); /* @@ -4275,7 +4019,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio) unsigned long flags; int oldprio, on_rq, running; struct rq *rq; - const struct sched_class *prev_class = p->sched_class; BUG_ON(prio < 0 || prio > MAX_PRIO); @@ -4301,10 +4044,18 @@ void rt_mutex_setprio(struct task_struct *p, int prio) if (on_rq) { if (running) p->sched_class->set_curr_task(rq); - enqueue_task(rq, p, 0); - - check_class_changed(rq, p, prev_class, oldprio, running); + /* + * Reschedule if we are currently running on this runqueue and + * our priority decreased, or if we are not currently running on + * this runqueue and our priority is higher than the current's + */ + if (running) { + if (p->prio > oldprio) + resched_task(rq->curr); + } else { + check_preempt_curr(rq, p); + } } task_rq_unlock(rq, &flags); } @@ -4336,8 +4087,10 @@ void set_user_nice(struct task_struct *p, long nice) goto out_unlock; } on_rq = p->se.on_rq; - if (on_rq) + if (on_rq) { dequeue_task(rq, p, 0); + dec_load(rq, p); + } p->static_prio = NICE_TO_PRIO(nice); set_load_weight(p); @@ -4347,6 +4100,7 @@ void set_user_nice(struct task_struct *p, long nice) if (on_rq) { enqueue_task(rq, p, 0); + inc_load(rq, p); /* * If the task increased its priority or is running and * lowered its priority, then reschedule its CPU: @@ -4504,7 +4258,6 @@ int sched_setscheduler(struct task_struct *p, int policy, { int retval, oldprio, oldpolicy = -1, on_rq, running; unsigned long flags; - const struct sched_class *prev_class = p->sched_class; struct rq *rq; /* may grab non-irq protected spin_locks */ @@ -4598,10 +4351,18 @@ int sched_setscheduler(struct task_struct *p, int policy, if (on_rq) { if (running) p->sched_class->set_curr_task(rq); - activate_task(rq, p, 0); - - check_class_changed(rq, p, prev_class, oldprio, running); + /* + * Reschedule if we are currently running on this runqueue and + * our priority decreased, or if we are not currently running on + * this runqueue and our priority is higher than the current's + */ + if (running) { + if (p->prio > oldprio) + resched_task(rq->curr); + } else { + check_preempt_curr(rq, p); + } } __task_rq_unlock(rq); spin_unlock_irqrestore(&p->pi_lock, flags); @@ -4729,13 +4490,13 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) struct task_struct *p; int retval; - get_online_cpus(); + mutex_lock(&sched_hotcpu_mutex); read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (!p) { read_unlock(&tasklist_lock); - put_online_cpus(); + mutex_unlock(&sched_hotcpu_mutex); return -ESRCH; } @@ -4775,7 +4536,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) } out_unlock: put_task_struct(p); - put_online_cpus(); + mutex_unlock(&sched_hotcpu_mutex); return retval; } @@ -4832,7 +4593,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) struct task_struct *p; int retval; - get_online_cpus(); + mutex_lock(&sched_hotcpu_mutex); read_lock(&tasklist_lock); retval = -ESRCH; @@ -4848,7 +4609,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) out_unlock: read_unlock(&tasklist_lock); - put_online_cpus(); + mutex_unlock(&sched_hotcpu_mutex); return retval; } @@ -4922,8 +4683,7 @@ static void __cond_resched(void) } while (need_resched()); } -#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_VOLUNTARY) -int __sched _cond_resched(void) +int __sched cond_resched(void) { if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) && system_state == SYSTEM_RUNNING) { @@ -4932,8 +4692,7 @@ int __sched _cond_resched(void) } return 0; } -EXPORT_SYMBOL(_cond_resched); -#endif +EXPORT_SYMBOL(cond_resched); /* * cond_resched_lock() - if a reschedule is pending, drop the given lock, @@ -5131,7 +4890,7 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) static const char stat_nam[] = "RSDTtZX"; -void sched_show_task(struct task_struct *p) +static void show_task(struct task_struct *p) { unsigned long free = 0; unsigned state; @@ -5161,7 +4920,8 @@ void sched_show_task(struct task_struct *p) printk(KERN_CONT "%5lu %5d %6d\n", free, task_pid_nr(p), task_pid_nr(p->real_parent)); - show_stack(p, NULL); + if (state != TASK_RUNNING) + show_stack(p, NULL); } void show_state_filter(unsigned long state_filter) @@ -5183,7 +4943,7 @@ void show_state_filter(unsigned long state_filter) */ touch_nmi_watchdog(); if (!state_filter || (p->state & state_filter)) - sched_show_task(p); + show_task(p); } while_each_thread(g, p); touch_all_softlockup_watchdogs(); @@ -5232,8 +4992,11 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) spin_unlock_irqrestore(&rq->lock, flags); /* Set the preempt count _outside_ the spinlocks! */ +#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) + task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0); +#else task_thread_info(idle)->preempt_count = 0; - +#endif /* * The idle tasks have their own, simple scheduling class: */ @@ -5314,13 +5077,7 @@ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) goto out; } - if (p->sched_class->set_cpus_allowed) - p->sched_class->set_cpus_allowed(p, &new_mask); - else { - p->cpus_allowed = new_mask; - p->rt.nr_cpus_allowed = cpus_weight(new_mask); - } - + p->cpus_allowed = new_mask; /* Can the task run on the task's current CPU? If so, we're done */ if (cpu_isset(task_cpu(p), new_mask)) goto out; @@ -5812,6 +5569,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) struct rq *rq; switch (action) { + case CPU_LOCK_ACQUIRE: + mutex_lock(&sched_hotcpu_mutex); + break; case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: @@ -5830,15 +5590,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_ONLINE_FROZEN: /* Strictly unnecessary, as first user will wake it. */ wake_up_process(cpu_rq(cpu)->migration_thread); - - /* Update our root-domain */ - rq = cpu_rq(cpu); - spin_lock_irqsave(&rq->lock, flags); - if (rq->rd) { - BUG_ON(!cpu_isset(cpu, rq->rd->span)); - cpu_set(cpu, rq->rd->online); - } - spin_unlock_irqrestore(&rq->lock, flags); break; #ifdef CONFIG_HOTPLUG_CPU @@ -5889,18 +5640,10 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) } spin_unlock_irq(&rq->lock); break; - - case CPU_DOWN_PREPARE: - /* Update our root-domain */ - rq = cpu_rq(cpu); - spin_lock_irqsave(&rq->lock, flags); - if (rq->rd) { - BUG_ON(!cpu_isset(cpu, rq->rd->span)); - cpu_clear(cpu, rq->rd->online); - } - spin_unlock_irqrestore(&rq->lock, flags); - break; #endif + case CPU_LOCK_RELEASE: + mutex_unlock(&sched_hotcpu_mutex); + break; } return NOTIFY_OK; } @@ -6088,76 +5831,11 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) return 1; } -static void rq_attach_root(struct rq *rq, struct root_domain *rd) -{ - unsigned long flags; - const struct sched_class *class; - - spin_lock_irqsave(&rq->lock, flags); - - if (rq->rd) { - struct root_domain *old_rd = rq->rd; - - for (class = sched_class_highest; class; class = class->next) { - if (class->leave_domain) - class->leave_domain(rq); - } - - cpu_clear(rq->cpu, old_rd->span); - cpu_clear(rq->cpu, old_rd->online); - - if (atomic_dec_and_test(&old_rd->refcount)) - kfree(old_rd); - } - - atomic_inc(&rd->refcount); - rq->rd = rd; - - cpu_set(rq->cpu, rd->span); - if (cpu_isset(rq->cpu, cpu_online_map)) - cpu_set(rq->cpu, rd->online); - - for (class = sched_class_highest; class; class = class->next) { - if (class->join_domain) - class->join_domain(rq); - } - - spin_unlock_irqrestore(&rq->lock, flags); -} - -static void init_rootdomain(struct root_domain *rd) -{ - memset(rd, 0, sizeof(*rd)); - - cpus_clear(rd->span); - cpus_clear(rd->online); -} - -static void init_defrootdomain(void) -{ - init_rootdomain(&def_root_domain); - atomic_set(&def_root_domain.refcount, 1); -} - -static struct root_domain *alloc_rootdomain(void) -{ - struct root_domain *rd; - - rd = kmalloc(sizeof(*rd), GFP_KERNEL); - if (!rd) - return NULL; - - init_rootdomain(rd); - - return rd; -} - /* - * Attach the domain 'sd' to 'cpu' as its base domain. Callers must + * Attach the domain 'sd' to 'cpu' as its base domain. Callers must * hold the hotplug lock. */ -static void -cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) +static void cpu_attach_domain(struct sched_domain *sd, int cpu) { struct rq *rq = cpu_rq(cpu); struct sched_domain *tmp; @@ -6182,7 +5860,6 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) sched_domain_debug(sd, cpu); - rq_attach_root(rq, rd); rcu_assign_pointer(rq->sd, sd); } @@ -6551,7 +6228,6 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) static int build_sched_domains(const cpumask_t *cpu_map) { int i; - struct root_domain *rd; #ifdef CONFIG_NUMA struct sched_group **sched_group_nodes = NULL; int sd_allnodes = 0; @@ -6568,12 +6244,6 @@ static int build_sched_domains(const cpumask_t *cpu_map) sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; #endif - rd = alloc_rootdomain(); - if (!rd) { - printk(KERN_WARNING "Cannot alloc root domain\n"); - return -ENOMEM; - } - /* * Set up domains for cpus specified by the cpu_map. */ @@ -6790,7 +6460,7 @@ static int build_sched_domains(const cpumask_t *cpu_map) #else sd = &per_cpu(phys_domains, i); #endif - cpu_attach_domain(sd, rd, i); + cpu_attach_domain(sd, i); } return 0; @@ -6848,7 +6518,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) unregister_sched_domain_sysctl(); for_each_cpu_mask(i, *cpu_map) - cpu_attach_domain(NULL, &def_root_domain, i); + cpu_attach_domain(NULL, i); synchronize_sched(); arch_destroy_sched_domains(cpu_map); } @@ -6878,8 +6548,6 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new) { int i, j; - lock_doms_cur(); - /* always unregister in case we don't destroy any domains */ unregister_sched_domain_sysctl(); @@ -6920,8 +6588,6 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new) ndoms_cur = ndoms_new; register_sched_domain_sysctl(); - - unlock_doms_cur(); } #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) @@ -6929,10 +6595,10 @@ static int arch_reinit_sched_domains(void) { int err; - get_online_cpus(); + mutex_lock(&sched_hotcpu_mutex); detach_destroy_domains(&cpu_online_map); err = arch_init_sched_domains(&cpu_online_map); - put_online_cpus(); + mutex_unlock(&sched_hotcpu_mutex); return err; } @@ -7043,12 +6709,12 @@ void __init sched_init_smp(void) { cpumask_t non_isolated_cpus; - get_online_cpus(); + mutex_lock(&sched_hotcpu_mutex); arch_init_sched_domains(&cpu_online_map); cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); if (cpus_empty(non_isolated_cpus)) cpu_set(smp_processor_id(), non_isolated_cpus); - put_online_cpus(); + mutex_unlock(&sched_hotcpu_mutex); /* XXX: Theoretical race here - CPU may be hotplugged now */ hotcpu_notifier(update_sched_domains, 0); @@ -7056,21 +6722,6 @@ void __init sched_init_smp(void) if (set_cpus_allowed(current, non_isolated_cpus) < 0) BUG(); sched_init_granularity(); - -#ifdef CONFIG_FAIR_GROUP_SCHED - if (nr_cpu_ids == 1) - return; - - lb_monitor_task = kthread_create(load_balance_monitor, NULL, - "group_balance"); - if (!IS_ERR(lb_monitor_task)) { - lb_monitor_task->flags |= PF_NOFREEZE; - wake_up_process(lb_monitor_task); - } else { - printk(KERN_ERR "Could not create load balance monitor thread" - "(error = %ld) \n", PTR_ERR(lb_monitor_task)); - } -#endif } #else void __init sched_init_smp(void) @@ -7095,87 +6746,13 @@ static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) cfs_rq->min_vruntime = (u64)(-(1LL << 20)); } -static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) -{ - struct rt_prio_array *array; - int i; - - array = &rt_rq->active; - for (i = 0; i < MAX_RT_PRIO; i++) { - INIT_LIST_HEAD(array->queue + i); - __clear_bit(i, array->bitmap); - } - /* delimiter for bitsearch: */ - __set_bit(MAX_RT_PRIO, array->bitmap); - -#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED - rt_rq->highest_prio = MAX_RT_PRIO; -#endif -#ifdef CONFIG_SMP - rt_rq->rt_nr_migratory = 0; - rt_rq->overloaded = 0; -#endif - - rt_rq->rt_time = 0; - rt_rq->rt_throttled = 0; - -#ifdef CONFIG_FAIR_GROUP_SCHED - rt_rq->rq = rq; -#endif -} - -#ifdef CONFIG_FAIR_GROUP_SCHED -static void init_tg_cfs_entry(struct rq *rq, struct task_group *tg, - struct cfs_rq *cfs_rq, struct sched_entity *se, - int cpu, int add) -{ - tg->cfs_rq[cpu] = cfs_rq; - init_cfs_rq(cfs_rq, rq); - cfs_rq->tg = tg; - if (add) - list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); - - tg->se[cpu] = se; - se->cfs_rq = &rq->cfs; - se->my_q = cfs_rq; - se->load.weight = tg->shares; - se->load.inv_weight = div64_64(1ULL<<32, se->load.weight); - se->parent = NULL; -} - -static void init_tg_rt_entry(struct rq *rq, struct task_group *tg, - struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, - int cpu, int add) -{ - tg->rt_rq[cpu] = rt_rq; - init_rt_rq(rt_rq, rq); - rt_rq->tg = tg; - rt_rq->rt_se = rt_se; - if (add) - list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list); - - tg->rt_se[cpu] = rt_se; - rt_se->rt_rq = &rq->rt; - rt_se->my_q = rt_rq; - rt_se->parent = NULL; - INIT_LIST_HEAD(&rt_se->run_list); -} -#endif - void __init sched_init(void) { int highest_cpu = 0; int i, j; -#ifdef CONFIG_SMP - init_defrootdomain(); -#endif - -#ifdef CONFIG_FAIR_GROUP_SCHED - list_add(&init_task_group.list, &task_groups); -#endif - for_each_possible_cpu(i) { + struct rt_prio_array *array; struct rq *rq; rq = cpu_rq(i); @@ -7184,39 +6761,52 @@ void __init sched_init(void) rq->nr_running = 0; rq->clock = 1; init_cfs_rq(&rq->cfs, rq); - init_rt_rq(&rq->rt, rq); #ifdef CONFIG_FAIR_GROUP_SCHED - init_task_group.shares = init_task_group_load; INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); - init_tg_cfs_entry(rq, &init_task_group, - &per_cpu(init_cfs_rq, i), - &per_cpu(init_sched_entity, i), i, 1); - - init_task_group.rt_ratio = sysctl_sched_rt_ratio; /* XXX */ - INIT_LIST_HEAD(&rq->leaf_rt_rq_list); - init_tg_rt_entry(rq, &init_task_group, - &per_cpu(init_rt_rq, i), - &per_cpu(init_sched_rt_entity, i), i, 1); + { + struct cfs_rq *cfs_rq = &per_cpu(init_cfs_rq, i); + struct sched_entity *se = + &per_cpu(init_sched_entity, i); + + init_cfs_rq_p[i] = cfs_rq; + init_cfs_rq(cfs_rq, rq); + cfs_rq->tg = &init_task_group; + list_add(&cfs_rq->leaf_cfs_rq_list, + &rq->leaf_cfs_rq_list); + + init_sched_entity_p[i] = se; + se->cfs_rq = &rq->cfs; + se->my_q = cfs_rq; + se->load.weight = init_task_group_load; + se->load.inv_weight = + div64_64(1ULL<<32, init_task_group_load); + se->parent = NULL; + } + init_task_group.shares = init_task_group_load; + spin_lock_init(&init_task_group.lock); #endif - rq->rt_period_expire = 0; - rq->rt_throttled = 0; for (j = 0; j < CPU_LOAD_IDX_MAX; j++) rq->cpu_load[j] = 0; #ifdef CONFIG_SMP rq->sd = NULL; - rq->rd = NULL; rq->active_balance = 0; rq->next_balance = jiffies; rq->push_cpu = 0; rq->cpu = i; rq->migration_thread = NULL; INIT_LIST_HEAD(&rq->migration_queue); - rq_attach_root(rq, &def_root_domain); #endif - init_rq_hrtick(rq); atomic_set(&rq->nr_iowait, 0); + + array = &rq->rt.active; + for (j = 0; j < MAX_RT_PRIO; j++) { + INIT_LIST_HEAD(array->queue + j); + __clear_bit(j, array->bitmap); + } highest_cpu = i; + /* delimiter for bitsearch: */ + __set_bit(MAX_RT_PRIO, array->bitmap); } set_load_weight(&init_task); @@ -7385,187 +6975,12 @@ void set_curr_task(int cpu, struct task_struct *p) #ifdef CONFIG_FAIR_GROUP_SCHED -#ifdef CONFIG_SMP -/* - * distribute shares of all task groups among their schedulable entities, - * to reflect load distribution across cpus. - */ -static int rebalance_shares(struct sched_domain *sd, int this_cpu) -{ - struct cfs_rq *cfs_rq; - struct rq *rq = cpu_rq(this_cpu); - cpumask_t sdspan = sd->span; - int balanced = 1; - - /* Walk thr' all the task groups that we have */ - for_each_leaf_cfs_rq(rq, cfs_rq) { - int i; - unsigned long total_load = 0, total_shares; - struct task_group *tg = cfs_rq->tg; - - /* Gather total task load of this group across cpus */ - for_each_cpu_mask(i, sdspan) - total_load += tg->cfs_rq[i]->load.weight; - - /* Nothing to do if this group has no load */ - if (!total_load) - continue; - - /* - * tg->shares represents the number of cpu shares the task group - * is eligible to hold on a single cpu. On N cpus, it is - * eligible to hold (N * tg->shares) number of cpu shares. - */ - total_shares = tg->shares * cpus_weight(sdspan); - - /* - * redistribute total_shares across cpus as per the task load - * distribution. - */ - for_each_cpu_mask(i, sdspan) { - unsigned long local_load, local_shares; - - local_load = tg->cfs_rq[i]->load.weight; - local_shares = (local_load * total_shares) / total_load; - if (!local_shares) - local_shares = MIN_GROUP_SHARES; - if (local_shares == tg->se[i]->load.weight) - continue; - - spin_lock_irq(&cpu_rq(i)->lock); - set_se_shares(tg->se[i], local_shares); - spin_unlock_irq(&cpu_rq(i)->lock); - balanced = 0; - } - } - - return balanced; -} - -/* - * How frequently should we rebalance_shares() across cpus? - * - * The more frequently we rebalance shares, the more accurate is the fairness - * of cpu bandwidth distribution between task groups. However higher frequency - * also implies increased scheduling overhead. - * - * sysctl_sched_min_bal_int_shares represents the minimum interval between - * consecutive calls to rebalance_shares() in the same sched domain. - * - * sysctl_sched_max_bal_int_shares represents the maximum interval between - * consecutive calls to rebalance_shares() in the same sched domain. - * - * These settings allows for the appropriate trade-off between accuracy of - * fairness and the associated overhead. - * - */ - -/* default: 8ms, units: milliseconds */ -const_debug unsigned int sysctl_sched_min_bal_int_shares = 8; - -/* default: 128ms, units: milliseconds */ -const_debug unsigned int sysctl_sched_max_bal_int_shares = 128; - -/* kernel thread that runs rebalance_shares() periodically */ -static int load_balance_monitor(void *unused) -{ - unsigned int timeout = sysctl_sched_min_bal_int_shares; - struct sched_param schedparm; - int ret; - - /* - * We don't want this thread's execution to be limited by the shares - * assigned to default group (init_task_group). Hence make it run - * as a SCHED_RR RT task at the lowest priority. - */ - schedparm.sched_priority = 1; - ret = sched_setscheduler(current, SCHED_RR, &schedparm); - if (ret) - printk(KERN_ERR "Couldn't set SCHED_RR policy for load balance" - " monitor thread (error = %d) \n", ret); - - while (!kthread_should_stop()) { - int i, cpu, balanced = 1; - - /* Prevent cpus going down or coming up */ - get_online_cpus(); - /* lockout changes to doms_cur[] array */ - lock_doms_cur(); - /* - * Enter a rcu read-side critical section to safely walk rq->sd - * chain on various cpus and to walk task group list - * (rq->leaf_cfs_rq_list) in rebalance_shares(). - */ - rcu_read_lock(); - - for (i = 0; i < ndoms_cur; i++) { - cpumask_t cpumap = doms_cur[i]; - struct sched_domain *sd = NULL, *sd_prev = NULL; - - cpu = first_cpu(cpumap); - - /* Find the highest domain at which to balance shares */ - for_each_domain(cpu, sd) { - if (!(sd->flags & SD_LOAD_BALANCE)) - continue; - sd_prev = sd; - } - - sd = sd_prev; - /* sd == NULL? No load balance reqd in this domain */ - if (!sd) - continue; - - balanced &= rebalance_shares(sd, cpu); - } - - rcu_read_unlock(); - - unlock_doms_cur(); - put_online_cpus(); - - if (!balanced) - timeout = sysctl_sched_min_bal_int_shares; - else if (timeout < sysctl_sched_max_bal_int_shares) - timeout *= 2; - - msleep_interruptible(timeout); - } - - return 0; -} -#endif /* CONFIG_SMP */ - -static void free_sched_group(struct task_group *tg) -{ - int i; - - for_each_possible_cpu(i) { - if (tg->cfs_rq) - kfree(tg->cfs_rq[i]); - if (tg->se) - kfree(tg->se[i]); - if (tg->rt_rq) - kfree(tg->rt_rq[i]); - if (tg->rt_se) - kfree(tg->rt_se[i]); - } - - kfree(tg->cfs_rq); - kfree(tg->se); - kfree(tg->rt_rq); - kfree(tg->rt_se); - kfree(tg); -} - /* allocate runqueue etc for a new task group */ struct task_group *sched_create_group(void) { struct task_group *tg; struct cfs_rq *cfs_rq; struct sched_entity *se; - struct rt_rq *rt_rq; - struct sched_rt_entity *rt_se; struct rq *rq; int i; @@ -7579,89 +6994,97 @@ struct task_group *sched_create_group(void) tg->se = kzalloc(sizeof(se) * NR_CPUS, GFP_KERNEL); if (!tg->se) goto err; - tg->rt_rq = kzalloc(sizeof(rt_rq) * NR_CPUS, GFP_KERNEL); - if (!tg->rt_rq) - goto err; - tg->rt_se = kzalloc(sizeof(rt_se) * NR_CPUS, GFP_KERNEL); - if (!tg->rt_se) - goto err; - - tg->shares = NICE_0_LOAD; - tg->rt_ratio = 0; /* XXX */ for_each_possible_cpu(i) { rq = cpu_rq(i); - cfs_rq = kmalloc_node(sizeof(struct cfs_rq), - GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); + cfs_rq = kmalloc_node(sizeof(struct cfs_rq), GFP_KERNEL, + cpu_to_node(i)); if (!cfs_rq) goto err; - se = kmalloc_node(sizeof(struct sched_entity), - GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); + se = kmalloc_node(sizeof(struct sched_entity), GFP_KERNEL, + cpu_to_node(i)); if (!se) goto err; - rt_rq = kmalloc_node(sizeof(struct rt_rq), - GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); - if (!rt_rq) - goto err; + memset(cfs_rq, 0, sizeof(struct cfs_rq)); + memset(se, 0, sizeof(struct sched_entity)); - rt_se = kmalloc_node(sizeof(struct sched_rt_entity), - GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); - if (!rt_se) - goto err; + tg->cfs_rq[i] = cfs_rq; + init_cfs_rq(cfs_rq, rq); + cfs_rq->tg = tg; - init_tg_cfs_entry(rq, tg, cfs_rq, se, i, 0); - init_tg_rt_entry(rq, tg, rt_rq, rt_se, i, 0); + tg->se[i] = se; + se->cfs_rq = &rq->cfs; + se->my_q = cfs_rq; + se->load.weight = NICE_0_LOAD; + se->load.inv_weight = div64_64(1ULL<<32, NICE_0_LOAD); + se->parent = NULL; } - lock_task_group_list(); for_each_possible_cpu(i) { rq = cpu_rq(i); cfs_rq = tg->cfs_rq[i]; list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); - rt_rq = tg->rt_rq[i]; - list_add_rcu(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list); } - list_add_rcu(&tg->list, &task_groups); - unlock_task_group_list(); + + tg->shares = NICE_0_LOAD; + spin_lock_init(&tg->lock); return tg; err: - free_sched_group(tg); + for_each_possible_cpu(i) { + if (tg->cfs_rq) + kfree(tg->cfs_rq[i]); + if (tg->se) + kfree(tg->se[i]); + } + kfree(tg->cfs_rq); + kfree(tg->se); + kfree(tg); + return ERR_PTR(-ENOMEM); } /* rcu callback to free various structures associated with a task group */ -static void free_sched_group_rcu(struct rcu_head *rhp) +static void free_sched_group(struct rcu_head *rhp) { + struct task_group *tg = container_of(rhp, struct task_group, rcu); + struct cfs_rq *cfs_rq; + struct sched_entity *se; + int i; + /* now it should be safe to free those cfs_rqs */ - free_sched_group(container_of(rhp, struct task_group, rcu)); + for_each_possible_cpu(i) { + cfs_rq = tg->cfs_rq[i]; + kfree(cfs_rq); + + se = tg->se[i]; + kfree(se); + } + + kfree(tg->cfs_rq); + kfree(tg->se); + kfree(tg); } /* Destroy runqueue etc associated with a task group */ void sched_destroy_group(struct task_group *tg) { struct cfs_rq *cfs_rq = NULL; - struct rt_rq *rt_rq = NULL; int i; - lock_task_group_list(); for_each_possible_cpu(i) { cfs_rq = tg->cfs_rq[i]; list_del_rcu(&cfs_rq->leaf_cfs_rq_list); - rt_rq = tg->rt_rq[i]; - list_del_rcu(&rt_rq->leaf_rt_rq_list); } - list_del_rcu(&tg->list); - unlock_task_group_list(); BUG_ON(!cfs_rq); /* wait for possible concurrent references to cfs_rqs complete */ - call_rcu(&tg->rcu, free_sched_group_rcu); + call_rcu(&tg->rcu, free_sched_group); } /* change task's runqueue when it moves between groups. @@ -7677,6 +7100,11 @@ void sched_move_task(struct task_struct *tsk) rq = task_rq_lock(tsk, &flags); + if (tsk->sched_class != &fair_sched_class) { + set_task_cfs_rq(tsk, task_cpu(tsk)); + goto done; + } + update_rq_clock(rq); running = task_current(rq, tsk); @@ -7688,7 +7116,7 @@ void sched_move_task(struct task_struct *tsk) tsk->sched_class->put_prev_task(rq, tsk); } - set_task_rq(tsk, task_cpu(tsk)); + set_task_cfs_rq(tsk, task_cpu(tsk)); if (on_rq) { if (unlikely(running)) @@ -7696,82 +7124,53 @@ void sched_move_task(struct task_struct *tsk) enqueue_task(rq, tsk, 0); } +done: task_rq_unlock(rq, &flags); } -/* rq->lock to be locked by caller */ static void set_se_shares(struct sched_entity *se, unsigned long shares) { struct cfs_rq *cfs_rq = se->cfs_rq; struct rq *rq = cfs_rq->rq; int on_rq; - if (!shares) - shares = MIN_GROUP_SHARES; + spin_lock_irq(&rq->lock); on_rq = se->on_rq; - if (on_rq) { + if (on_rq) dequeue_entity(cfs_rq, se, 0); - dec_cpu_load(rq, se->load.weight); - } se->load.weight = shares; se->load.inv_weight = div64_64((1ULL<<32), shares); - if (on_rq) { + if (on_rq) enqueue_entity(cfs_rq, se, 0); - inc_cpu_load(rq, se->load.weight); - } + + spin_unlock_irq(&rq->lock); } int sched_group_set_shares(struct task_group *tg, unsigned long shares) { int i; - struct cfs_rq *cfs_rq; - struct rq *rq; - - lock_task_group_list(); - if (tg->shares == shares) - goto done; - - if (shares < MIN_GROUP_SHARES) - shares = MIN_GROUP_SHARES; /* - * Prevent any load balance activity (rebalance_shares, - * load_balance_fair) from referring to this group first, - * by taking it off the rq->leaf_cfs_rq_list on each cpu. + * A weight of 0 or 1 can cause arithmetics problems. + * (The default weight is 1024 - so there's no practical + * limitation from this.) */ - for_each_possible_cpu(i) { - cfs_rq = tg->cfs_rq[i]; - list_del_rcu(&cfs_rq->leaf_cfs_rq_list); - } + if (shares < 2) + shares = 2; - /* wait for any ongoing reference to this group to finish */ - synchronize_sched(); + spin_lock(&tg->lock); + if (tg->shares == shares) + goto done; - /* - * Now we are free to modify the group's share on each cpu - * w/o tripping rebalance_share or load_balance_fair. - */ tg->shares = shares; - for_each_possible_cpu(i) { - spin_lock_irq(&cpu_rq(i)->lock); + for_each_possible_cpu(i) set_se_shares(tg->se[i], shares); - spin_unlock_irq(&cpu_rq(i)->lock); - } - /* - * Enable load balance activity on this group, by inserting it back on - * each cpu's rq->leaf_cfs_rq_list. - */ - for_each_possible_cpu(i) { - rq = cpu_rq(i); - cfs_rq = tg->cfs_rq[i]; - list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); - } done: - unlock_task_group_list(); + spin_unlock(&tg->lock); return 0; } @@ -7780,31 +7179,6 @@ unsigned long sched_group_shares(struct task_group *tg) return tg->shares; } -/* - * Ensure the total rt_ratio <= sysctl_sched_rt_ratio - */ -int sched_group_set_rt_ratio(struct task_group *tg, unsigned long rt_ratio) -{ - struct task_group *tgi; - unsigned long total = 0; - - rcu_read_lock(); - list_for_each_entry_rcu(tgi, &task_groups, list) - total += tgi->rt_ratio; - rcu_read_unlock(); - - if (total + rt_ratio - tg->rt_ratio > sysctl_sched_rt_ratio) - return -EINVAL; - - tg->rt_ratio = rt_ratio; - return 0; -} - -unsigned long sched_group_rt_ratio(struct task_group *tg) -{ - return tg->rt_ratio; -} - #endif /* CONFIG_FAIR_GROUP_SCHED */ #ifdef CONFIG_FAIR_CGROUP_SCHED @@ -7880,30 +7254,12 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft) return (u64) tg->shares; } -static int cpu_rt_ratio_write_uint(struct cgroup *cgrp, struct cftype *cftype, - u64 rt_ratio_val) -{ - return sched_group_set_rt_ratio(cgroup_tg(cgrp), rt_ratio_val); -} - -static u64 cpu_rt_ratio_read_uint(struct cgroup *cgrp, struct cftype *cft) -{ - struct task_group *tg = cgroup_tg(cgrp); - - return (u64) tg->rt_ratio; -} - static struct cftype cpu_files[] = { { .name = "shares", .read_uint = cpu_shares_read_uint, .write_uint = cpu_shares_write_uint, }, - { - .name = "rt_ratio", - .read_uint = cpu_rt_ratio_read_uint, - .write_uint = cpu_rt_ratio_write_uint, - }, }; static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) diff --git a/trunk/kernel/sched_debug.c b/trunk/kernel/sched_debug.c index 4b5e24cf2f4a..80fbbfc04290 100644 --- a/trunk/kernel/sched_debug.c +++ b/trunk/kernel/sched_debug.c @@ -179,7 +179,6 @@ static void print_cpu(struct seq_file *m, int cpu) PN(prev_clock_raw); P(clock_warps); P(clock_overflows); - P(clock_underflows); P(clock_deep_idle_events); PN(clock_max_delta); P(cpu_load[0]); @@ -300,8 +299,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) PN(se.exec_max); PN(se.slice_max); PN(se.wait_max); - PN(se.wait_sum); - P(se.wait_count); P(sched_info.bkl_count); P(se.nr_migrations); P(se.nr_migrations_cold); @@ -369,8 +366,6 @@ void proc_sched_set_task(struct task_struct *p) { #ifdef CONFIG_SCHEDSTATS p->se.wait_max = 0; - p->se.wait_sum = 0; - p->se.wait_count = 0; p->se.sleep_max = 0; p->se.sum_sleep_runtime = 0; p->se.block_max = 0; diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index 72e25c7a3a18..da7c061e7206 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -20,8 +20,6 @@ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra */ -#include - /* * Targeted preemption latency for CPU-bound tasks: * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds) @@ -250,8 +248,8 @@ static u64 __sched_period(unsigned long nr_running) unsigned long nr_latency = sched_nr_latency; if (unlikely(nr_running > nr_latency)) { - period = sysctl_sched_min_granularity; period *= nr_running; + do_div(period, nr_latency); } return period; @@ -385,9 +383,6 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) { schedstat_set(se->wait_max, max(se->wait_max, rq_of(cfs_rq)->clock - se->wait_start)); - schedstat_set(se->wait_count, se->wait_count + 1); - schedstat_set(se->wait_sum, se->wait_sum + - rq_of(cfs_rq)->clock - se->wait_start); schedstat_set(se->wait_start, 0); } @@ -439,7 +434,6 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) #ifdef CONFIG_SCHEDSTATS if (se->sleep_start) { u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; - struct task_struct *tsk = task_of(se); if ((s64)delta < 0) delta = 0; @@ -449,12 +443,9 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) se->sleep_start = 0; se->sum_sleep_runtime += delta; - - account_scheduler_latency(tsk, delta >> 10, 1); } if (se->block_start) { u64 delta = rq_of(cfs_rq)->clock - se->block_start; - struct task_struct *tsk = task_of(se); if ((s64)delta < 0) delta = 0; @@ -471,11 +462,11 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) * time that the task spent sleeping: */ if (unlikely(prof_on == SLEEP_PROFILING)) { + struct task_struct *tsk = task_of(se); profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), delta >> 20); } - account_scheduler_latency(tsk, delta >> 10, 0); } #endif } @@ -651,29 +642,13 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) cfs_rq->curr = NULL; } -static void -entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) +static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) { /* * Update run-time statistics of the 'current'. */ update_curr(cfs_rq); -#ifdef CONFIG_SCHED_HRTICK - /* - * queued ticks are scheduled to match the slice, so don't bother - * validating it and just reschedule. - */ - if (queued) - return resched_task(rq_of(cfs_rq)->curr); - /* - * don't let the period tick interfere with the hrtick preemption - */ - if (!sched_feat(DOUBLE_TICK) && - hrtimer_active(&rq_of(cfs_rq)->hrtick_timer)) - return; -#endif - if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT)) check_preempt_tick(cfs_rq, curr); } @@ -715,7 +690,7 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu) /* Iterate thr' all leaf cfs_rq's on a runqueue */ #define for_each_leaf_cfs_rq(rq, cfs_rq) \ - list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) + list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) /* Do the two (enqueued) entities belong to the same group ? */ static inline int @@ -732,8 +707,6 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se) return se->parent; } -#define GROUP_IMBALANCE_PCT 20 - #else /* CONFIG_FAIR_GROUP_SCHED */ #define for_each_sched_entity(se) \ @@ -779,43 +752,6 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se) #endif /* CONFIG_FAIR_GROUP_SCHED */ -#ifdef CONFIG_SCHED_HRTICK -static void hrtick_start_fair(struct rq *rq, struct task_struct *p) -{ - int requeue = rq->curr == p; - struct sched_entity *se = &p->se; - struct cfs_rq *cfs_rq = cfs_rq_of(se); - - WARN_ON(task_rq(p) != rq); - - if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) { - u64 slice = sched_slice(cfs_rq, se); - u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime; - s64 delta = slice - ran; - - if (delta < 0) { - if (rq->curr == p) - resched_task(p); - return; - } - - /* - * Don't schedule slices shorter than 10000ns, that just - * doesn't make sense. Rely on vruntime for fairness. - */ - if (!requeue) - delta = max(10000LL, delta); - - hrtick_start(rq, delta, requeue); - } -} -#else -static inline void -hrtick_start_fair(struct rq *rq, struct task_struct *p) -{ -} -#endif - /* * The enqueue_task method is called before nr_running is * increased. Here we update the fair scheduling stats and @@ -824,28 +760,15 @@ hrtick_start_fair(struct rq *rq, struct task_struct *p) static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) { struct cfs_rq *cfs_rq; - struct sched_entity *se = &p->se, - *topse = NULL; /* Highest schedulable entity */ - int incload = 1; + struct sched_entity *se = &p->se; for_each_sched_entity(se) { - topse = se; - if (se->on_rq) { - incload = 0; + if (se->on_rq) break; - } cfs_rq = cfs_rq_of(se); enqueue_entity(cfs_rq, se, wakeup); wakeup = 1; } - /* Increment cpu load if we just enqueued the first task of a group on - * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs - * at the highest grouping level. - */ - if (incload) - inc_cpu_load(rq, topse->load.weight); - - hrtick_start_fair(rq, rq->curr); } /* @@ -856,30 +779,16 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup) static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) { struct cfs_rq *cfs_rq; - struct sched_entity *se = &p->se, - *topse = NULL; /* Highest schedulable entity */ - int decload = 1; + struct sched_entity *se = &p->se; for_each_sched_entity(se) { - topse = se; cfs_rq = cfs_rq_of(se); dequeue_entity(cfs_rq, se, sleep); /* Don't dequeue parent if it has other entities besides us */ - if (cfs_rq->load.weight) { - if (parent_entity(se)) - decload = 0; + if (cfs_rq->load.weight) break; - } sleep = 1; } - /* Decrement cpu load if we just dequeued the last task of a group on - * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs - * at the highest grouping level. - */ - if (decload) - dec_cpu_load(rq, topse->load.weight); - - hrtick_start_fair(rq, rq->curr); } /* @@ -926,154 +835,6 @@ static void yield_task_fair(struct rq *rq) se->vruntime = rightmost->vruntime + 1; } -/* - * wake_idle() will wake a task on an idle cpu if task->cpu is - * not idle and an idle cpu is available. The span of cpus to - * search starts with cpus closest then further out as needed, - * so we always favor a closer, idle cpu. - * - * Returns the CPU we should wake onto. - */ -#if defined(ARCH_HAS_SCHED_WAKE_IDLE) -static int wake_idle(int cpu, struct task_struct *p) -{ - cpumask_t tmp; - struct sched_domain *sd; - int i; - - /* - * If it is idle, then it is the best cpu to run this task. - * - * This cpu is also the best, if it has more than one task already. - * Siblings must be also busy(in most cases) as they didn't already - * pickup the extra load from this cpu and hence we need not check - * sibling runqueue info. This will avoid the checks and cache miss - * penalities associated with that. - */ - if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1) - return cpu; - - for_each_domain(cpu, sd) { - if (sd->flags & SD_WAKE_IDLE) { - cpus_and(tmp, sd->span, p->cpus_allowed); - for_each_cpu_mask(i, tmp) { - if (idle_cpu(i)) { - if (i != task_cpu(p)) { - schedstat_inc(p, - se.nr_wakeups_idle); - } - return i; - } - } - } else { - break; - } - } - return cpu; -} -#else -static inline int wake_idle(int cpu, struct task_struct *p) -{ - return cpu; -} -#endif - -#ifdef CONFIG_SMP -static int select_task_rq_fair(struct task_struct *p, int sync) -{ - int cpu, this_cpu; - struct rq *rq; - struct sched_domain *sd, *this_sd = NULL; - int new_cpu; - - cpu = task_cpu(p); - rq = task_rq(p); - this_cpu = smp_processor_id(); - new_cpu = cpu; - - if (cpu == this_cpu) - goto out_set_cpu; - - for_each_domain(this_cpu, sd) { - if (cpu_isset(cpu, sd->span)) { - this_sd = sd; - break; - } - } - - if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) - goto out_set_cpu; - - /* - * Check for affine wakeup and passive balancing possibilities. - */ - if (this_sd) { - int idx = this_sd->wake_idx; - unsigned int imbalance; - unsigned long load, this_load; - - imbalance = 100 + (this_sd->imbalance_pct - 100) / 2; - - load = source_load(cpu, idx); - this_load = target_load(this_cpu, idx); - - new_cpu = this_cpu; /* Wake to this CPU if we can */ - - if (this_sd->flags & SD_WAKE_AFFINE) { - unsigned long tl = this_load; - unsigned long tl_per_task; - - /* - * Attract cache-cold tasks on sync wakeups: - */ - if (sync && !task_hot(p, rq->clock, this_sd)) - goto out_set_cpu; - - schedstat_inc(p, se.nr_wakeups_affine_attempts); - tl_per_task = cpu_avg_load_per_task(this_cpu); - - /* - * If sync wakeup then subtract the (maximum possible) - * effect of the currently running task from the load - * of the current CPU: - */ - if (sync) - tl -= current->se.load.weight; - - if ((tl <= load && - tl + target_load(cpu, idx) <= tl_per_task) || - 100*(tl + p->se.load.weight) <= imbalance*load) { - /* - * This domain has SD_WAKE_AFFINE and - * p is cache cold in this domain, and - * there is no bad imbalance. - */ - schedstat_inc(this_sd, ttwu_move_affine); - schedstat_inc(p, se.nr_wakeups_affine); - goto out_set_cpu; - } - } - - /* - * Start passive balancing when half the imbalance_pct - * limit is reached. - */ - if (this_sd->flags & SD_WAKE_BALANCE) { - if (imbalance*this_load <= 100*load) { - schedstat_inc(this_sd, ttwu_move_balance); - schedstat_inc(p, se.nr_wakeups_passive); - goto out_set_cpu; - } - } - } - - new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */ -out_set_cpu: - return wake_idle(new_cpu, p); -} -#endif /* CONFIG_SMP */ - - /* * Preempt the current task with a newly woken task if needed: */ @@ -1115,7 +876,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) static struct task_struct *pick_next_task_fair(struct rq *rq) { - struct task_struct *p; struct cfs_rq *cfs_rq = &rq->cfs; struct sched_entity *se; @@ -1127,10 +887,7 @@ static struct task_struct *pick_next_task_fair(struct rq *rq) cfs_rq = group_cfs_rq(se); } while (cfs_rq); - p = task_of(se); - hrtick_start_fair(rq, p); - - return p; + return task_of(se); } /* @@ -1187,6 +944,25 @@ static struct task_struct *load_balance_next_fair(void *arg) return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr); } +#ifdef CONFIG_FAIR_GROUP_SCHED +static int cfs_rq_best_prio(struct cfs_rq *cfs_rq) +{ + struct sched_entity *curr; + struct task_struct *p; + + if (!cfs_rq->nr_running) + return MAX_PRIO; + + curr = cfs_rq->curr; + if (!curr) + curr = __pick_next_entity(cfs_rq); + + p = task_of(curr); + + return p->prio; +} +#endif + static unsigned long load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, unsigned long max_load_move, @@ -1196,45 +972,28 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, struct cfs_rq *busy_cfs_rq; long rem_load_move = max_load_move; struct rq_iterator cfs_rq_iterator; - unsigned long load_moved; cfs_rq_iterator.start = load_balance_start_fair; cfs_rq_iterator.next = load_balance_next_fair; for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { #ifdef CONFIG_FAIR_GROUP_SCHED - struct cfs_rq *this_cfs_rq = busy_cfs_rq->tg->cfs_rq[this_cpu]; - unsigned long maxload, task_load, group_weight; - unsigned long thisload, per_task_load; - struct sched_entity *se = busy_cfs_rq->tg->se[busiest->cpu]; - - task_load = busy_cfs_rq->load.weight; - group_weight = se->load.weight; + struct cfs_rq *this_cfs_rq; + long imbalance; + unsigned long maxload; - /* - * 'group_weight' is contributed by tasks of total weight - * 'task_load'. To move 'rem_load_move' worth of weight only, - * we need to move a maximum task load of: - * - * maxload = (remload / group_weight) * task_load; - */ - maxload = (rem_load_move * task_load) / group_weight; + this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu); - if (!maxload || !task_load) + imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight; + /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */ + if (imbalance <= 0) continue; - per_task_load = task_load / busy_cfs_rq->nr_running; - /* - * balance_tasks will try to forcibly move atleast one task if - * possible (because of SCHED_LOAD_SCALE_FUZZ). Avoid that if - * maxload is less than GROUP_IMBALANCE_FUZZ% the per_task_load. - */ - if (100 * maxload < GROUP_IMBALANCE_PCT * per_task_load) - continue; + /* Don't pull more than imbalance/2 */ + imbalance /= 2; + maxload = min(rem_load_move, imbalance); - /* Disable priority-based load balance */ - *this_best_prio = 0; - thisload = this_cfs_rq->load.weight; + *this_best_prio = cfs_rq_best_prio(this_cfs_rq); #else # define maxload rem_load_move #endif @@ -1243,33 +1002,11 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, * load_balance_[start|next]_fair iterators */ cfs_rq_iterator.arg = busy_cfs_rq; - load_moved = balance_tasks(this_rq, this_cpu, busiest, + rem_load_move -= balance_tasks(this_rq, this_cpu, busiest, maxload, sd, idle, all_pinned, this_best_prio, &cfs_rq_iterator); -#ifdef CONFIG_FAIR_GROUP_SCHED - /* - * load_moved holds the task load that was moved. The - * effective (group) weight moved would be: - * load_moved_eff = load_moved/task_load * group_weight; - */ - load_moved = (group_weight * load_moved) / task_load; - - /* Adjust shares on both cpus to reflect load_moved */ - group_weight -= load_moved; - set_se_shares(se, group_weight); - - se = busy_cfs_rq->tg->se[this_cpu]; - if (!thisload) - group_weight = load_moved; - else - group_weight = se->load.weight + load_moved; - set_se_shares(se, group_weight); -#endif - - rem_load_move -= load_moved; - if (rem_load_move <= 0) break; } @@ -1305,14 +1042,14 @@ move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, /* * scheduler tick hitting a task of our scheduling class: */ -static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) +static void task_tick_fair(struct rq *rq, struct task_struct *curr) { struct cfs_rq *cfs_rq; struct sched_entity *se = &curr->se; for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); - entity_tick(cfs_rq, se, queued); + entity_tick(cfs_rq, se); } } @@ -1350,42 +1087,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) resched_task(rq->curr); } -/* - * Priority of the task has changed. Check to see if we preempt - * the current task. - */ -static void prio_changed_fair(struct rq *rq, struct task_struct *p, - int oldprio, int running) -{ - /* - * Reschedule if we are currently running on this runqueue and - * our priority decreased, or if we are not currently running on - * this runqueue and our priority is higher than the current's - */ - if (running) { - if (p->prio > oldprio) - resched_task(rq->curr); - } else - check_preempt_curr(rq, p); -} - -/* - * We switched to the sched_fair class. - */ -static void switched_to_fair(struct rq *rq, struct task_struct *p, - int running) -{ - /* - * We were most likely switched from sched_rt, so - * kick off the schedule if running, otherwise just see - * if we can still preempt the current task. - */ - if (running) - resched_task(rq->curr); - else - check_preempt_curr(rq, p); -} - /* Account for a task changing its policy or group. * * This routine is mostly called to set cfs_rq->curr field when a task @@ -1407,9 +1108,6 @@ static const struct sched_class fair_sched_class = { .enqueue_task = enqueue_task_fair, .dequeue_task = dequeue_task_fair, .yield_task = yield_task_fair, -#ifdef CONFIG_SMP - .select_task_rq = select_task_rq_fair, -#endif /* CONFIG_SMP */ .check_preempt_curr = check_preempt_wakeup, @@ -1424,9 +1122,6 @@ static const struct sched_class fair_sched_class = { .set_curr_task = set_curr_task_fair, .task_tick = task_tick_fair, .task_new = task_new_fair, - - .prio_changed = prio_changed_fair, - .switched_to = switched_to_fair, }; #ifdef CONFIG_SCHED_DEBUG @@ -1437,9 +1132,7 @@ static void print_cfs_stats(struct seq_file *m, int cpu) #ifdef CONFIG_FAIR_GROUP_SCHED print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs); #endif - rcu_read_lock(); for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq) print_cfs_rq(m, cpu, cfs_rq); - rcu_read_unlock(); } #endif diff --git a/trunk/kernel/sched_idletask.c b/trunk/kernel/sched_idletask.c index 2bcafa375633..bf9c25c15b8b 100644 --- a/trunk/kernel/sched_idletask.c +++ b/trunk/kernel/sched_idletask.c @@ -5,12 +5,6 @@ * handled in sched_fair.c) */ -#ifdef CONFIG_SMP -static int select_task_rq_idle(struct task_struct *p, int sync) -{ - return task_cpu(p); /* IDLE tasks as never migrated */ -} -#endif /* CONFIG_SMP */ /* * Idle tasks are unconditionally rescheduled: */ @@ -61,7 +55,7 @@ move_one_task_idle(struct rq *this_rq, int this_cpu, struct rq *busiest, } #endif -static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) +static void task_tick_idle(struct rq *rq, struct task_struct *curr) { } @@ -69,33 +63,6 @@ static void set_curr_task_idle(struct rq *rq) { } -static void switched_to_idle(struct rq *rq, struct task_struct *p, - int running) -{ - /* Can this actually happen?? */ - if (running) - resched_task(rq->curr); - else - check_preempt_curr(rq, p); -} - -static void prio_changed_idle(struct rq *rq, struct task_struct *p, - int oldprio, int running) -{ - /* This can happen for hot plug CPUS */ - - /* - * Reschedule if we are currently running on this runqueue and - * our priority decreased, or if we are not currently running on - * this runqueue and our priority is higher than the current's - */ - if (running) { - if (p->prio > oldprio) - resched_task(rq->curr); - } else - check_preempt_curr(rq, p); -} - /* * Simple, special scheduling class for the per-CPU idle tasks: */ @@ -105,9 +72,6 @@ const struct sched_class idle_sched_class = { /* dequeue is not valid, we print a debug message there: */ .dequeue_task = dequeue_task_idle, -#ifdef CONFIG_SMP - .select_task_rq = select_task_rq_idle, -#endif /* CONFIG_SMP */ .check_preempt_curr = check_preempt_curr_idle, @@ -121,9 +85,5 @@ const struct sched_class idle_sched_class = { .set_curr_task = set_curr_task_idle, .task_tick = task_tick_idle, - - .prio_changed = prio_changed_idle, - .switched_to = switched_to_idle, - /* no .task_new for idle tasks */ }; diff --git a/trunk/kernel/sched_rt.c b/trunk/kernel/sched_rt.c index 274b40d7bef2..9ba3daa03475 100644 --- a/trunk/kernel/sched_rt.c +++ b/trunk/kernel/sched_rt.c @@ -3,217 +3,6 @@ * policies) */ -#ifdef CONFIG_SMP - -static inline int rt_overloaded(struct rq *rq) -{ - return atomic_read(&rq->rd->rto_count); -} - -static inline void rt_set_overload(struct rq *rq) -{ - cpu_set(rq->cpu, rq->rd->rto_mask); - /* - * Make sure the mask is visible before we set - * the overload count. That is checked to determine - * if we should look at the mask. It would be a shame - * if we looked at the mask, but the mask was not - * updated yet. - */ - wmb(); - atomic_inc(&rq->rd->rto_count); -} - -static inline void rt_clear_overload(struct rq *rq) -{ - /* the order here really doesn't matter */ - atomic_dec(&rq->rd->rto_count); - cpu_clear(rq->cpu, rq->rd->rto_mask); -} - -static void update_rt_migration(struct rq *rq) -{ - if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) { - if (!rq->rt.overloaded) { - rt_set_overload(rq); - rq->rt.overloaded = 1; - } - } else if (rq->rt.overloaded) { - rt_clear_overload(rq); - rq->rt.overloaded = 0; - } -} -#endif /* CONFIG_SMP */ - -static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) -{ - return container_of(rt_se, struct task_struct, rt); -} - -static inline int on_rt_rq(struct sched_rt_entity *rt_se) -{ - return !list_empty(&rt_se->run_list); -} - -#ifdef CONFIG_FAIR_GROUP_SCHED - -static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq) -{ - if (!rt_rq->tg) - return SCHED_RT_FRAC; - - return rt_rq->tg->rt_ratio; -} - -#define for_each_leaf_rt_rq(rt_rq, rq) \ - list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) - -static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) -{ - return rt_rq->rq; -} - -static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) -{ - return rt_se->rt_rq; -} - -#define for_each_sched_rt_entity(rt_se) \ - for (; rt_se; rt_se = rt_se->parent) - -static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) -{ - return rt_se->my_q; -} - -static void enqueue_rt_entity(struct sched_rt_entity *rt_se); -static void dequeue_rt_entity(struct sched_rt_entity *rt_se); - -static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq) -{ - struct sched_rt_entity *rt_se = rt_rq->rt_se; - - if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) { - struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; - - enqueue_rt_entity(rt_se); - if (rt_rq->highest_prio < curr->prio) - resched_task(curr); - } -} - -static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq) -{ - struct sched_rt_entity *rt_se = rt_rq->rt_se; - - if (rt_se && on_rt_rq(rt_se)) - dequeue_rt_entity(rt_se); -} - -#else - -static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq) -{ - return sysctl_sched_rt_ratio; -} - -#define for_each_leaf_rt_rq(rt_rq, rq) \ - for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) - -static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) -{ - return container_of(rt_rq, struct rq, rt); -} - -static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) -{ - struct task_struct *p = rt_task_of(rt_se); - struct rq *rq = task_rq(p); - - return &rq->rt; -} - -#define for_each_sched_rt_entity(rt_se) \ - for (; rt_se; rt_se = NULL) - -static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) -{ - return NULL; -} - -static inline void sched_rt_ratio_enqueue(struct rt_rq *rt_rq) -{ -} - -static inline void sched_rt_ratio_dequeue(struct rt_rq *rt_rq) -{ -} - -#endif - -static inline int rt_se_prio(struct sched_rt_entity *rt_se) -{ -#ifdef CONFIG_FAIR_GROUP_SCHED - struct rt_rq *rt_rq = group_rt_rq(rt_se); - - if (rt_rq) - return rt_rq->highest_prio; -#endif - - return rt_task_of(rt_se)->prio; -} - -static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq) -{ - unsigned int rt_ratio = sched_rt_ratio(rt_rq); - u64 period, ratio; - - if (rt_ratio == SCHED_RT_FRAC) - return 0; - - if (rt_rq->rt_throttled) - return 1; - - period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC; - ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT; - - if (rt_rq->rt_time > ratio) { - struct rq *rq = rq_of_rt_rq(rt_rq); - - rq->rt_throttled = 1; - rt_rq->rt_throttled = 1; - - sched_rt_ratio_dequeue(rt_rq); - return 1; - } - - return 0; -} - -static void update_sched_rt_period(struct rq *rq) -{ - struct rt_rq *rt_rq; - u64 period; - - while (rq->clock > rq->rt_period_expire) { - period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC; - rq->rt_period_expire += period; - - for_each_leaf_rt_rq(rt_rq, rq) { - unsigned long rt_ratio = sched_rt_ratio(rt_rq); - u64 ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT; - - rt_rq->rt_time -= min(rt_rq->rt_time, ratio); - if (rt_rq->rt_throttled) { - rt_rq->rt_throttled = 0; - sched_rt_ratio_enqueue(rt_rq); - } - } - - rq->rt_throttled = 0; - } -} - /* * Update the current task's runtime statistics. Skip current tasks that * are not in our scheduling class. @@ -221,8 +10,6 @@ static void update_sched_rt_period(struct rq *rq) static void update_curr_rt(struct rq *rq) { struct task_struct *curr = rq->curr; - struct sched_rt_entity *rt_se = &curr->rt; - struct rt_rq *rt_rq = rt_rq_of_se(rt_se); u64 delta_exec; if (!task_has_rt_policy(curr)) @@ -237,228 +24,47 @@ static void update_curr_rt(struct rq *rq) curr->se.sum_exec_runtime += delta_exec; curr->se.exec_start = rq->clock; cpuacct_charge(curr, delta_exec); - - rt_rq->rt_time += delta_exec; - /* - * might make it a tad more accurate: - * - * update_sched_rt_period(rq); - */ - if (sched_rt_ratio_exceeded(rt_rq)) - resched_task(curr); -} - -static inline -void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) -{ - WARN_ON(!rt_prio(rt_se_prio(rt_se))); - rt_rq->rt_nr_running++; -#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED - if (rt_se_prio(rt_se) < rt_rq->highest_prio) - rt_rq->highest_prio = rt_se_prio(rt_se); -#endif -#ifdef CONFIG_SMP - if (rt_se->nr_cpus_allowed > 1) { - struct rq *rq = rq_of_rt_rq(rt_rq); - rq->rt.rt_nr_migratory++; - } - - update_rt_migration(rq_of_rt_rq(rt_rq)); -#endif } -static inline -void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) -{ - WARN_ON(!rt_prio(rt_se_prio(rt_se))); - WARN_ON(!rt_rq->rt_nr_running); - rt_rq->rt_nr_running--; -#if defined CONFIG_SMP || defined CONFIG_FAIR_GROUP_SCHED - if (rt_rq->rt_nr_running) { - struct rt_prio_array *array; - - WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio); - if (rt_se_prio(rt_se) == rt_rq->highest_prio) { - /* recalculate */ - array = &rt_rq->active; - rt_rq->highest_prio = - sched_find_first_bit(array->bitmap); - } /* otherwise leave rq->highest prio alone */ - } else - rt_rq->highest_prio = MAX_RT_PRIO; -#endif -#ifdef CONFIG_SMP - if (rt_se->nr_cpus_allowed > 1) { - struct rq *rq = rq_of_rt_rq(rt_rq); - rq->rt.rt_nr_migratory--; - } - - update_rt_migration(rq_of_rt_rq(rt_rq)); -#endif /* CONFIG_SMP */ -} - -static void enqueue_rt_entity(struct sched_rt_entity *rt_se) -{ - struct rt_rq *rt_rq = rt_rq_of_se(rt_se); - struct rt_prio_array *array = &rt_rq->active; - struct rt_rq *group_rq = group_rt_rq(rt_se); - - if (group_rq && group_rq->rt_throttled) - return; - - list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); - __set_bit(rt_se_prio(rt_se), array->bitmap); - - inc_rt_tasks(rt_se, rt_rq); -} - -static void dequeue_rt_entity(struct sched_rt_entity *rt_se) -{ - struct rt_rq *rt_rq = rt_rq_of_se(rt_se); - struct rt_prio_array *array = &rt_rq->active; - - list_del_init(&rt_se->run_list); - if (list_empty(array->queue + rt_se_prio(rt_se))) - __clear_bit(rt_se_prio(rt_se), array->bitmap); - - dec_rt_tasks(rt_se, rt_rq); -} - -/* - * Because the prio of an upper entry depends on the lower - * entries, we must remove entries top - down. - * - * XXX: O(1/2 h^2) because we can only walk up, not down the chain. - * doesn't matter much for now, as h=2 for GROUP_SCHED. - */ -static void dequeue_rt_stack(struct task_struct *p) +static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) { - struct sched_rt_entity *rt_se, *top_se; + struct rt_prio_array *array = &rq->rt.active; - /* - * dequeue all, top - down. - */ - do { - rt_se = &p->rt; - top_se = NULL; - for_each_sched_rt_entity(rt_se) { - if (on_rt_rq(rt_se)) - top_se = rt_se; - } - if (top_se) - dequeue_rt_entity(top_se); - } while (top_se); + list_add_tail(&p->run_list, array->queue + p->prio); + __set_bit(p->prio, array->bitmap); } /* * Adding/removing a task to/from a priority array: */ -static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) -{ - struct sched_rt_entity *rt_se = &p->rt; - - if (wakeup) - rt_se->timeout = 0; - - dequeue_rt_stack(p); - - /* - * enqueue everybody, bottom - up. - */ - for_each_sched_rt_entity(rt_se) - enqueue_rt_entity(rt_se); - - inc_cpu_load(rq, p->se.load.weight); -} - static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) { - struct sched_rt_entity *rt_se = &p->rt; - struct rt_rq *rt_rq; + struct rt_prio_array *array = &rq->rt.active; update_curr_rt(rq); - dequeue_rt_stack(p); - - /* - * re-enqueue all non-empty rt_rq entities. - */ - for_each_sched_rt_entity(rt_se) { - rt_rq = group_rt_rq(rt_se); - if (rt_rq && rt_rq->rt_nr_running) - enqueue_rt_entity(rt_se); - } - - dec_cpu_load(rq, p->se.load.weight); + list_del(&p->run_list); + if (list_empty(array->queue + p->prio)) + __clear_bit(p->prio, array->bitmap); } /* * Put task to the end of the run list without the overhead of dequeue * followed by enqueue. */ -static -void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se) -{ - struct rt_prio_array *array = &rt_rq->active; - - list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se)); -} - static void requeue_task_rt(struct rq *rq, struct task_struct *p) { - struct sched_rt_entity *rt_se = &p->rt; - struct rt_rq *rt_rq; + struct rt_prio_array *array = &rq->rt.active; - for_each_sched_rt_entity(rt_se) { - rt_rq = rt_rq_of_se(rt_se); - requeue_rt_entity(rt_rq, rt_se); - } + list_move_tail(&p->run_list, array->queue + p->prio); } -static void yield_task_rt(struct rq *rq) +static void +yield_task_rt(struct rq *rq) { requeue_task_rt(rq, rq->curr); } -#ifdef CONFIG_SMP -static int find_lowest_rq(struct task_struct *task); - -static int select_task_rq_rt(struct task_struct *p, int sync) -{ - struct rq *rq = task_rq(p); - - /* - * If the current task is an RT task, then - * try to see if we can wake this RT task up on another - * runqueue. Otherwise simply start this RT task - * on its current runqueue. - * - * We want to avoid overloading runqueues. Even if - * the RT task is of higher priority than the current RT task. - * RT tasks behave differently than other tasks. If - * one gets preempted, we try to push it off to another queue. - * So trying to keep a preempting RT task on the same - * cache hot CPU will force the running RT task to - * a cold CPU. So we waste all the cache for the lower - * RT task in hopes of saving some of a RT task - * that is just being woken and probably will have - * cold cache anyway. - */ - if (unlikely(rt_task(rq->curr)) && - (p->rt.nr_cpus_allowed > 1)) { - int cpu = find_lowest_rq(p); - - return (cpu == -1) ? task_cpu(p) : cpu; - } - - /* - * Otherwise, just let it ride on the affined RQ and the - * post-schedule router will push the preempted task away - */ - return task_cpu(p); -} -#endif /* CONFIG_SMP */ - /* * Preempt the current task with a newly woken task if needed: */ @@ -468,46 +74,23 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) resched_task(rq->curr); } -static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, - struct rt_rq *rt_rq) +static struct task_struct *pick_next_task_rt(struct rq *rq) { - struct rt_prio_array *array = &rt_rq->active; - struct sched_rt_entity *next = NULL; + struct rt_prio_array *array = &rq->rt.active; + struct task_struct *next; struct list_head *queue; int idx; idx = sched_find_first_bit(array->bitmap); - BUG_ON(idx >= MAX_RT_PRIO); - - queue = array->queue + idx; - next = list_entry(queue->next, struct sched_rt_entity, run_list); - - return next; -} - -static struct task_struct *pick_next_task_rt(struct rq *rq) -{ - struct sched_rt_entity *rt_se; - struct task_struct *p; - struct rt_rq *rt_rq; - - rt_rq = &rq->rt; - - if (unlikely(!rt_rq->rt_nr_running)) + if (idx >= MAX_RT_PRIO) return NULL; - if (sched_rt_ratio_exceeded(rt_rq)) - return NULL; + queue = array->queue + idx; + next = list_entry(queue->next, struct task_struct, run_list); - do { - rt_se = pick_next_rt_entity(rq, rt_rq); - BUG_ON(!rt_se); - rt_rq = group_rt_rq(rt_se); - } while (rt_rq); + next->se.exec_start = rq->clock; - p = rt_task_of(rt_se); - p->se.exec_start = rq->clock; - return p; + return next; } static void put_prev_task_rt(struct rq *rq, struct task_struct *p) @@ -517,448 +100,76 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) } #ifdef CONFIG_SMP - -/* Only try algorithms three times */ -#define RT_MAX_TRIES 3 - -static int double_lock_balance(struct rq *this_rq, struct rq *busiest); -static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); - -static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) -{ - if (!task_running(rq, p) && - (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) && - (p->rt.nr_cpus_allowed > 1)) - return 1; - return 0; -} - -/* Return the second highest RT task, NULL otherwise */ -static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) +/* + * Load-balancing iterator. Note: while the runqueue stays locked + * during the whole iteration, the current task might be + * dequeued so the iterator has to be dequeue-safe. Here we + * achieve that by always pre-iterating before returning + * the current task: + */ +static struct task_struct *load_balance_start_rt(void *arg) { - struct task_struct *next = NULL; - struct sched_rt_entity *rt_se; - struct rt_prio_array *array; - struct rt_rq *rt_rq; + struct rq *rq = arg; + struct rt_prio_array *array = &rq->rt.active; + struct list_head *head, *curr; + struct task_struct *p; int idx; - for_each_leaf_rt_rq(rt_rq, rq) { - array = &rt_rq->active; - idx = sched_find_first_bit(array->bitmap); - next_idx: - if (idx >= MAX_RT_PRIO) - continue; - if (next && next->prio < idx) - continue; - list_for_each_entry(rt_se, array->queue + idx, run_list) { - struct task_struct *p = rt_task_of(rt_se); - if (pick_rt_task(rq, p, cpu)) { - next = p; - break; - } - } - if (!next) { - idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1); - goto next_idx; - } - } - - return next; -} - -static DEFINE_PER_CPU(cpumask_t, local_cpu_mask); - -static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask) -{ - int lowest_prio = -1; - int lowest_cpu = -1; - int count = 0; - int cpu; - - cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed); - - /* - * Scan each rq for the lowest prio. - */ - for_each_cpu_mask(cpu, *lowest_mask) { - struct rq *rq = cpu_rq(cpu); - - /* We look for lowest RT prio or non-rt CPU */ - if (rq->rt.highest_prio >= MAX_RT_PRIO) { - /* - * if we already found a low RT queue - * and now we found this non-rt queue - * clear the mask and set our bit. - * Otherwise just return the queue as is - * and the count==1 will cause the algorithm - * to use the first bit found. - */ - if (lowest_cpu != -1) { - cpus_clear(*lowest_mask); - cpu_set(rq->cpu, *lowest_mask); - } - return 1; - } - - /* no locking for now */ - if ((rq->rt.highest_prio > task->prio) - && (rq->rt.highest_prio >= lowest_prio)) { - if (rq->rt.highest_prio > lowest_prio) { - /* new low - clear old data */ - lowest_prio = rq->rt.highest_prio; - lowest_cpu = cpu; - count = 0; - } - count++; - } else - cpu_clear(cpu, *lowest_mask); - } - - /* - * Clear out all the set bits that represent - * runqueues that were of higher prio than - * the lowest_prio. - */ - if (lowest_cpu > 0) { - /* - * Perhaps we could add another cpumask op to - * zero out bits. Like cpu_zero_bits(cpumask, nrbits); - * Then that could be optimized to use memset and such. - */ - for_each_cpu_mask(cpu, *lowest_mask) { - if (cpu >= lowest_cpu) - break; - cpu_clear(cpu, *lowest_mask); - } - } - - return count; -} - -static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) -{ - int first; - - /* "this_cpu" is cheaper to preempt than a remote processor */ - if ((this_cpu != -1) && cpu_isset(this_cpu, *mask)) - return this_cpu; - - first = first_cpu(*mask); - if (first != NR_CPUS) - return first; - - return -1; -} - -static int find_lowest_rq(struct task_struct *task) -{ - struct sched_domain *sd; - cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask); - int this_cpu = smp_processor_id(); - int cpu = task_cpu(task); - int count = find_lowest_cpus(task, lowest_mask); - - if (!count) - return -1; /* No targets found */ - - /* - * There is no sense in performing an optimal search if only one - * target is found. - */ - if (count == 1) - return first_cpu(*lowest_mask); - - /* - * At this point we have built a mask of cpus representing the - * lowest priority tasks in the system. Now we want to elect - * the best one based on our affinity and topology. - * - * We prioritize the last cpu that the task executed on since - * it is most likely cache-hot in that location. - */ - if (cpu_isset(cpu, *lowest_mask)) - return cpu; - - /* - * Otherwise, we consult the sched_domains span maps to figure - * out which cpu is logically closest to our hot cache data. - */ - if (this_cpu == cpu) - this_cpu = -1; /* Skip this_cpu opt if the same */ - - for_each_domain(cpu, sd) { - if (sd->flags & SD_WAKE_AFFINE) { - cpumask_t domain_mask; - int best_cpu; - - cpus_and(domain_mask, sd->span, *lowest_mask); - - best_cpu = pick_optimal_cpu(this_cpu, - &domain_mask); - if (best_cpu != -1) - return best_cpu; - } - } - - /* - * And finally, if there were no matches within the domains - * just give the caller *something* to work with from the compatible - * locations. - */ - return pick_optimal_cpu(this_cpu, lowest_mask); -} - -/* Will lock the rq it finds */ -static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) -{ - struct rq *lowest_rq = NULL; - int tries; - int cpu; - - for (tries = 0; tries < RT_MAX_TRIES; tries++) { - cpu = find_lowest_rq(task); - - if ((cpu == -1) || (cpu == rq->cpu)) - break; + idx = sched_find_first_bit(array->bitmap); + if (idx >= MAX_RT_PRIO) + return NULL; - lowest_rq = cpu_rq(cpu); + head = array->queue + idx; + curr = head->prev; - /* if the prio of this runqueue changed, try again */ - if (double_lock_balance(rq, lowest_rq)) { - /* - * We had to unlock the run queue. In - * the mean time, task could have - * migrated already or had its affinity changed. - * Also make sure that it wasn't scheduled on its rq. - */ - if (unlikely(task_rq(task) != rq || - !cpu_isset(lowest_rq->cpu, - task->cpus_allowed) || - task_running(rq, task) || - !task->se.on_rq)) { + p = list_entry(curr, struct task_struct, run_list); - spin_unlock(&lowest_rq->lock); - lowest_rq = NULL; - break; - } - } + curr = curr->prev; - /* If this rq is still suitable use it. */ - if (lowest_rq->rt.highest_prio > task->prio) - break; + rq->rt.rt_load_balance_idx = idx; + rq->rt.rt_load_balance_head = head; + rq->rt.rt_load_balance_curr = curr; - /* try again */ - spin_unlock(&lowest_rq->lock); - lowest_rq = NULL; - } - - return lowest_rq; + return p; } -/* - * If the current CPU has more than one RT task, see if the non - * running task can migrate over to a CPU that is running a task - * of lesser priority. - */ -static int push_rt_task(struct rq *rq) +static struct task_struct *load_balance_next_rt(void *arg) { - struct task_struct *next_task; - struct rq *lowest_rq; - int ret = 0; - int paranoid = RT_MAX_TRIES; - - if (!rq->rt.overloaded) - return 0; + struct rq *rq = arg; + struct rt_prio_array *array = &rq->rt.active; + struct list_head *head, *curr; + struct task_struct *p; + int idx; - next_task = pick_next_highest_task_rt(rq, -1); - if (!next_task) - return 0; - - retry: - if (unlikely(next_task == rq->curr)) { - WARN_ON(1); - return 0; - } + idx = rq->rt.rt_load_balance_idx; + head = rq->rt.rt_load_balance_head; + curr = rq->rt.rt_load_balance_curr; /* - * It's possible that the next_task slipped in of - * higher priority than current. If that's the case - * just reschedule current. + * If we arrived back to the head again then + * iterate to the next queue (if any): */ - if (unlikely(next_task->prio < rq->curr->prio)) { - resched_task(rq->curr); - return 0; - } - - /* We might release rq lock */ - get_task_struct(next_task); - - /* find_lock_lowest_rq locks the rq if found */ - lowest_rq = find_lock_lowest_rq(next_task, rq); - if (!lowest_rq) { - struct task_struct *task; - /* - * find lock_lowest_rq releases rq->lock - * so it is possible that next_task has changed. - * If it has, then try again. - */ - task = pick_next_highest_task_rt(rq, -1); - if (unlikely(task != next_task) && task && paranoid--) { - put_task_struct(next_task); - next_task = task; - goto retry; - } - goto out; - } - - deactivate_task(rq, next_task, 0); - set_task_cpu(next_task, lowest_rq->cpu); - activate_task(lowest_rq, next_task, 0); + if (unlikely(head == curr)) { + int next_idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1); - resched_task(lowest_rq->curr); + if (next_idx >= MAX_RT_PRIO) + return NULL; - spin_unlock(&lowest_rq->lock); + idx = next_idx; + head = array->queue + idx; + curr = head->prev; - ret = 1; -out: - put_task_struct(next_task); - - return ret; -} - -/* - * TODO: Currently we just use the second highest prio task on - * the queue, and stop when it can't migrate (or there's - * no more RT tasks). There may be a case where a lower - * priority RT task has a different affinity than the - * higher RT task. In this case the lower RT task could - * possibly be able to migrate where as the higher priority - * RT task could not. We currently ignore this issue. - * Enhancements are welcome! - */ -static void push_rt_tasks(struct rq *rq) -{ - /* push_rt_task will return true if it moved an RT */ - while (push_rt_task(rq)) - ; -} - -static int pull_rt_task(struct rq *this_rq) -{ - int this_cpu = this_rq->cpu, ret = 0, cpu; - struct task_struct *p, *next; - struct rq *src_rq; - - if (likely(!rt_overloaded(this_rq))) - return 0; - - next = pick_next_task_rt(this_rq); - - for_each_cpu_mask(cpu, this_rq->rd->rto_mask) { - if (this_cpu == cpu) - continue; - - src_rq = cpu_rq(cpu); - /* - * We can potentially drop this_rq's lock in - * double_lock_balance, and another CPU could - * steal our next task - hence we must cause - * the caller to recalculate the next task - * in that case: - */ - if (double_lock_balance(this_rq, src_rq)) { - struct task_struct *old_next = next; - - next = pick_next_task_rt(this_rq); - if (next != old_next) - ret = 1; - } - - /* - * Are there still pullable RT tasks? - */ - if (src_rq->rt.rt_nr_running <= 1) - goto skip; - - p = pick_next_highest_task_rt(src_rq, this_cpu); - - /* - * Do we have an RT task that preempts - * the to-be-scheduled task? - */ - if (p && (!next || (p->prio < next->prio))) { - WARN_ON(p == src_rq->curr); - WARN_ON(!p->se.on_rq); - - /* - * There's a chance that p is higher in priority - * than what's currently running on its cpu. - * This is just that p is wakeing up and hasn't - * had a chance to schedule. We only pull - * p if it is lower in priority than the - * current task on the run queue or - * this_rq next task is lower in prio than - * the current task on that rq. - */ - if (p->prio < src_rq->curr->prio || - (next && next->prio < src_rq->curr->prio)) - goto skip; - - ret = 1; - - deactivate_task(src_rq, p, 0); - set_task_cpu(p, this_cpu); - activate_task(this_rq, p, 0); - /* - * We continue with the search, just in - * case there's an even higher prio task - * in another runqueue. (low likelyhood - * but possible) - * - * Update next so that we won't pick a task - * on another cpu with a priority lower (or equal) - * than the one we just picked. - */ - next = p; - - } - skip: - spin_unlock(&src_rq->lock); + rq->rt.rt_load_balance_idx = idx; + rq->rt.rt_load_balance_head = head; } - return ret; -} - -static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) -{ - /* Try to pull RT tasks here if we lower this rq's prio */ - if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio) - pull_rt_task(rq); -} + p = list_entry(curr, struct task_struct, run_list); -static void post_schedule_rt(struct rq *rq) -{ - /* - * If we have more than one rt_task queued, then - * see if we can push the other rt_tasks off to other CPUS. - * Note we may release the rq lock, and since - * the lock was owned by prev, we need to release it - * first via finish_lock_switch and then reaquire it here. - */ - if (unlikely(rq->rt.overloaded)) { - spin_lock_irq(&rq->lock); - push_rt_tasks(rq); - spin_unlock_irq(&rq->lock); - } -} + curr = curr->prev; + rq->rt.rt_load_balance_curr = curr; -static void task_wake_up_rt(struct rq *rq, struct task_struct *p) -{ - if (!task_running(rq, p) && - (p->prio >= rq->rt.highest_prio) && - rq->rt.overloaded) - push_rt_tasks(rq); + return p; } static unsigned long @@ -967,170 +178,38 @@ load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, struct sched_domain *sd, enum cpu_idle_type idle, int *all_pinned, int *this_best_prio) { - /* don't touch RT tasks */ - return 0; -} - -static int -move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, - struct sched_domain *sd, enum cpu_idle_type idle) -{ - /* don't touch RT tasks */ - return 0; -} + struct rq_iterator rt_rq_iterator; -static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask) -{ - int weight = cpus_weight(*new_mask); - - BUG_ON(!rt_task(p)); - - /* - * Update the migration status of the RQ if we have an RT task - * which is running AND changing its weight value. + rt_rq_iterator.start = load_balance_start_rt; + rt_rq_iterator.next = load_balance_next_rt; + /* pass 'busiest' rq argument into + * load_balance_[start|next]_rt iterators */ - if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { - struct rq *rq = task_rq(p); - - if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) { - rq->rt.rt_nr_migratory++; - } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) { - BUG_ON(!rq->rt.rt_nr_migratory); - rq->rt.rt_nr_migratory--; - } + rt_rq_iterator.arg = busiest; - update_rt_migration(rq); - } - - p->cpus_allowed = *new_mask; - p->rt.nr_cpus_allowed = weight; -} - -/* Assumes rq->lock is held */ -static void join_domain_rt(struct rq *rq) -{ - if (rq->rt.overloaded) - rt_set_overload(rq); -} - -/* Assumes rq->lock is held */ -static void leave_domain_rt(struct rq *rq) -{ - if (rq->rt.overloaded) - rt_clear_overload(rq); -} - -/* - * When switch from the rt queue, we bring ourselves to a position - * that we might want to pull RT tasks from other runqueues. - */ -static void switched_from_rt(struct rq *rq, struct task_struct *p, - int running) -{ - /* - * If there are other RT tasks then we will reschedule - * and the scheduling of the other RT tasks will handle - * the balancing. But if we are the last RT task - * we may need to handle the pulling of RT tasks - * now. - */ - if (!rq->rt.rt_nr_running) - pull_rt_task(rq); + return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd, + idle, all_pinned, this_best_prio, &rt_rq_iterator); } -#endif /* CONFIG_SMP */ -/* - * When switching a task to RT, we may overload the runqueue - * with RT tasks. In this case we try to push them off to - * other runqueues. - */ -static void switched_to_rt(struct rq *rq, struct task_struct *p, - int running) -{ - int check_resched = 1; - - /* - * If we are already running, then there's nothing - * that needs to be done. But if we are not running - * we may need to preempt the current running task. - * If that current running task is also an RT task - * then see if we can move to another run queue. - */ - if (!running) { -#ifdef CONFIG_SMP - if (rq->rt.overloaded && push_rt_task(rq) && - /* Don't resched if we changed runqueues */ - rq != task_rq(p)) - check_resched = 0; -#endif /* CONFIG_SMP */ - if (check_resched && p->prio < rq->curr->prio) - resched_task(rq->curr); - } -} - -/* - * Priority of the task has changed. This may cause - * us to initiate a push or pull. - */ -static void prio_changed_rt(struct rq *rq, struct task_struct *p, - int oldprio, int running) -{ - if (running) { -#ifdef CONFIG_SMP - /* - * If our priority decreases while running, we - * may need to pull tasks to this runqueue. - */ - if (oldprio < p->prio) - pull_rt_task(rq); - /* - * If there's a higher priority task waiting to run - * then reschedule. - */ - if (p->prio > rq->rt.highest_prio) - resched_task(p); -#else - /* For UP simply resched on drop of prio */ - if (oldprio < p->prio) - resched_task(p); -#endif /* CONFIG_SMP */ - } else { - /* - * This task is not running, but if it is - * greater than the current running task - * then reschedule. - */ - if (p->prio < rq->curr->prio) - resched_task(rq->curr); - } -} - -static void watchdog(struct rq *rq, struct task_struct *p) +static int +move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, + struct sched_domain *sd, enum cpu_idle_type idle) { - unsigned long soft, hard; - - if (!p->signal) - return; - - soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur; - hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max; + struct rq_iterator rt_rq_iterator; - if (soft != RLIM_INFINITY) { - unsigned long next; + rt_rq_iterator.start = load_balance_start_rt; + rt_rq_iterator.next = load_balance_next_rt; + rt_rq_iterator.arg = busiest; - p->rt.timeout++; - next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); - if (p->rt.timeout > next) - p->it_sched_expires = p->se.sum_exec_runtime; - } + return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, + &rt_rq_iterator); } +#endif -static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) +static void task_tick_rt(struct rq *rq, struct task_struct *p) { update_curr_rt(rq); - watchdog(rq, p); - /* * RR tasks need a special form of timeslice management. * FIFO tasks have no timeslices. @@ -1138,16 +217,16 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) if (p->policy != SCHED_RR) return; - if (--p->rt.time_slice) + if (--p->time_slice) return; - p->rt.time_slice = DEF_TIMESLICE; + p->time_slice = DEF_TIMESLICE; /* * Requeue to the end of queue if we are not the only element * on the queue: */ - if (p->rt.run_list.prev != p->rt.run_list.next) { + if (p->run_list.prev != p->run_list.next) { requeue_task_rt(rq, p); set_tsk_need_resched(p); } @@ -1165,9 +244,6 @@ const struct sched_class rt_sched_class = { .enqueue_task = enqueue_task_rt, .dequeue_task = dequeue_task_rt, .yield_task = yield_task_rt, -#ifdef CONFIG_SMP - .select_task_rq = select_task_rq_rt, -#endif /* CONFIG_SMP */ .check_preempt_curr = check_preempt_curr_rt, @@ -1177,18 +253,8 @@ const struct sched_class rt_sched_class = { #ifdef CONFIG_SMP .load_balance = load_balance_rt, .move_one_task = move_one_task_rt, - .set_cpus_allowed = set_cpus_allowed_rt, - .join_domain = join_domain_rt, - .leave_domain = leave_domain_rt, - .pre_schedule = pre_schedule_rt, - .post_schedule = post_schedule_rt, - .task_wake_up = task_wake_up_rt, - .switched_from = switched_from_rt, #endif .set_curr_task = set_curr_task_rt, .task_tick = task_tick_rt, - - .prio_changed = prio_changed_rt, - .switched_to = switched_to_rt, }; diff --git a/trunk/kernel/softlockup.c b/trunk/kernel/softlockup.c index c1d76552446e..11df812263c8 100644 --- a/trunk/kernel/softlockup.c +++ b/trunk/kernel/softlockup.c @@ -8,7 +8,6 @@ */ #include #include -#include #include #include #include @@ -24,8 +23,8 @@ static DEFINE_PER_CPU(unsigned long, touch_timestamp); static DEFINE_PER_CPU(unsigned long, print_timestamp); static DEFINE_PER_CPU(struct task_struct *, watchdog_task); -static int __read_mostly did_panic; -unsigned long __read_mostly softlockup_thresh = 60; +static int did_panic; +int softlockup_thresh = 10; static int softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) @@ -46,7 +45,7 @@ static struct notifier_block panic_block = { */ static unsigned long get_timestamp(int this_cpu) { - return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ + return cpu_clock(this_cpu) >> 30; /* 2^30 ~= 10^9 */ } void touch_softlockup_watchdog(void) @@ -101,7 +100,11 @@ void softlockup_tick(void) now = get_timestamp(this_cpu); - /* Warn about unreasonable delays: */ + /* Wake up the high-prio watchdog task every second: */ + if (now > (touch_timestamp + 1)) + wake_up_process(per_cpu(watchdog_task, this_cpu)); + + /* Warn about unreasonable 10+ seconds delays: */ if (now <= (touch_timestamp + softlockup_thresh)) return; @@ -118,94 +121,12 @@ void softlockup_tick(void) spin_unlock(&print_lock); } -/* - * Have a reasonable limit on the number of tasks checked: - */ -unsigned long __read_mostly sysctl_hung_task_check_count = 1024; - -/* - * Zero means infinite timeout - no checking done: - */ -unsigned long __read_mostly sysctl_hung_task_timeout_secs = 120; - -unsigned long __read_mostly sysctl_hung_task_warnings = 10; - -/* - * Only do the hung-tasks check on one CPU: - */ -static int check_cpu __read_mostly = -1; - -static void check_hung_task(struct task_struct *t, unsigned long now) -{ - unsigned long switch_count = t->nvcsw + t->nivcsw; - - if (t->flags & PF_FROZEN) - return; - - if (switch_count != t->last_switch_count || !t->last_switch_timestamp) { - t->last_switch_count = switch_count; - t->last_switch_timestamp = now; - return; - } - if ((long)(now - t->last_switch_timestamp) < - sysctl_hung_task_timeout_secs) - return; - if (sysctl_hung_task_warnings < 0) - return; - sysctl_hung_task_warnings--; - - /* - * Ok, the task did not get scheduled for more than 2 minutes, - * complain: - */ - printk(KERN_ERR "INFO: task %s:%d blocked for more than " - "%ld seconds.\n", t->comm, t->pid, - sysctl_hung_task_timeout_secs); - printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" - " disables this message.\n"); - sched_show_task(t); - __debug_show_held_locks(t); - - t->last_switch_timestamp = now; - touch_nmi_watchdog(); -} - -/* - * Check whether a TASK_UNINTERRUPTIBLE does not get woken up for - * a really long time (120 seconds). If that happens, print out - * a warning. - */ -static void check_hung_uninterruptible_tasks(int this_cpu) -{ - int max_count = sysctl_hung_task_check_count; - unsigned long now = get_timestamp(this_cpu); - struct task_struct *g, *t; - - /* - * If the system crashed already then all bets are off, - * do not report extra hung tasks: - */ - if ((tainted & TAINT_DIE) || did_panic) - return; - - read_lock(&tasklist_lock); - do_each_thread(g, t) { - if (!--max_count) - break; - if (t->state & TASK_UNINTERRUPTIBLE) - check_hung_task(t, now); - } while_each_thread(g, t); - - read_unlock(&tasklist_lock); -} - /* * The watchdog thread - runs every second and touches the timestamp. */ static int watchdog(void *__bind_cpu) { struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; - int this_cpu = (long)__bind_cpu; sched_setscheduler(current, SCHED_FIFO, ¶m); @@ -214,18 +135,13 @@ static int watchdog(void *__bind_cpu) /* * Run briefly once per second to reset the softlockup timestamp. - * If this gets delayed for more than 60 seconds then the + * If this gets delayed for more than 10 seconds then the * debug-printout triggers in softlockup_tick(). */ while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); touch_softlockup_watchdog(); - msleep_interruptible(10000); - - if (this_cpu != check_cpu) - continue; - - if (sysctl_hung_task_timeout_secs) - check_hung_uninterruptible_tasks(this_cpu); + schedule(); } return 0; @@ -255,7 +171,6 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: - check_cpu = any_online_cpu(cpu_online_map); wake_up_process(per_cpu(watchdog_task, hotcpu)); break; #ifdef CONFIG_HOTPLUG_CPU @@ -266,15 +181,6 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) /* Unbind so it can run. Fall thru. */ kthread_bind(per_cpu(watchdog_task, hotcpu), any_online_cpu(cpu_online_map)); - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - if (hotcpu == check_cpu) { - cpumask_t temp_cpu_online_map = cpu_online_map; - - cpu_clear(hotcpu, temp_cpu_online_map); - check_cpu = any_online_cpu(temp_cpu_online_map); - } - break; case CPU_DEAD: case CPU_DEAD_FROZEN: p = per_cpu(watchdog_task, hotcpu); diff --git a/trunk/kernel/stop_machine.c b/trunk/kernel/stop_machine.c index 51b5ee53571a..319821ef78af 100644 --- a/trunk/kernel/stop_machine.c +++ b/trunk/kernel/stop_machine.c @@ -203,13 +203,13 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) int ret; /* No CPUs can come up or down during this. */ - get_online_cpus(); + lock_cpu_hotplug(); p = __stop_machine_run(fn, data, cpu); if (!IS_ERR(p)) ret = kthread_stop(p); else ret = PTR_ERR(p); - put_online_cpus(); + unlock_cpu_hotplug(); return ret; } diff --git a/trunk/kernel/sysctl.c b/trunk/kernel/sysctl.c index 8e96558cb8f3..c68f68dcc605 100644 --- a/trunk/kernel/sysctl.c +++ b/trunk/kernel/sysctl.c @@ -81,7 +81,6 @@ extern int compat_log; extern int maps_protect; extern int sysctl_stat_interval; extern int audit_argv_kb; -extern int latencytop_enabled; /* Constants used for minimum and maximum */ #ifdef CONFIG_DETECT_SOFTLOCKUP @@ -307,43 +306,9 @@ static struct ctl_table kern_table[] = { .procname = "sched_nr_migrate", .data = &sysctl_sched_nr_migrate, .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = CTL_UNNUMBERED, - .procname = "sched_rt_period_ms", - .data = &sysctl_sched_rt_period, - .maxlen = sizeof(unsigned int), - .mode = 0644, + .mode = 644, .proc_handler = &proc_dointvec, }, - { - .ctl_name = CTL_UNNUMBERED, - .procname = "sched_rt_ratio", - .data = &sysctl_sched_rt_ratio, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, -#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP) - { - .ctl_name = CTL_UNNUMBERED, - .procname = "sched_min_bal_int_shares", - .data = &sysctl_sched_min_bal_int_shares, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = CTL_UNNUMBERED, - .procname = "sched_max_bal_int_shares", - .data = &sysctl_sched_max_bal_int_shares, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, -#endif #endif { .ctl_name = CTL_UNNUMBERED, @@ -417,15 +382,6 @@ static struct ctl_table kern_table[] = { .proc_handler = &proc_dointvec_taint, }, #endif -#ifdef CONFIG_LATENCYTOP - { - .procname = "latencytop", - .data = &latencytop_enabled, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, -#endif #ifdef CONFIG_SECURITY_CAPABILITIES { .procname = "cap-bound", @@ -772,40 +728,13 @@ static struct ctl_table kern_table[] = { .ctl_name = CTL_UNNUMBERED, .procname = "softlockup_thresh", .data = &softlockup_thresh, - .maxlen = sizeof(unsigned long), + .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = &proc_dointvec_minmax, .strategy = &sysctl_intvec, .extra1 = &one, .extra2 = &sixty, }, - { - .ctl_name = CTL_UNNUMBERED, - .procname = "hung_task_check_count", - .data = &sysctl_hung_task_check_count, - .maxlen = sizeof(unsigned long), - .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, - .strategy = &sysctl_intvec, - }, - { - .ctl_name = CTL_UNNUMBERED, - .procname = "hung_task_timeout_secs", - .data = &sysctl_hung_task_timeout_secs, - .maxlen = sizeof(unsigned long), - .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, - .strategy = &sysctl_intvec, - }, - { - .ctl_name = CTL_UNNUMBERED, - .procname = "hung_task_warnings", - .data = &sysctl_hung_task_warnings, - .maxlen = sizeof(unsigned long), - .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, - .strategy = &sysctl_intvec, - }, #endif #ifdef CONFIG_COMPAT { diff --git a/trunk/kernel/time/tick-sched.c b/trunk/kernel/time/tick-sched.c index 1a21b6fdb674..cb89fa8db110 100644 --- a/trunk/kernel/time/tick-sched.c +++ b/trunk/kernel/time/tick-sched.c @@ -153,7 +153,6 @@ void tick_nohz_update_jiffies(void) void tick_nohz_stop_sched_tick(void) { unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; - unsigned long rt_jiffies; struct tick_sched *ts; ktime_t last_update, expires, now, delta; struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; @@ -217,10 +216,6 @@ void tick_nohz_stop_sched_tick(void) next_jiffies = get_next_timer_interrupt(last_jiffies); delta_jiffies = next_jiffies - last_jiffies; - rt_jiffies = rt_needs_cpu(cpu); - if (rt_jiffies && rt_jiffies < delta_jiffies) - delta_jiffies = rt_jiffies; - if (rcu_needs_cpu(cpu)) delta_jiffies = 1; /* @@ -514,6 +509,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) { struct tick_sched *ts = container_of(timer, struct tick_sched, sched_timer); + struct hrtimer_cpu_base *base = timer->base->cpu_base; struct pt_regs *regs = get_irq_regs(); ktime_t now = ktime_get(); int cpu = smp_processor_id(); @@ -551,8 +547,15 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) touch_softlockup_watchdog(); ts->idle_jiffies++; } + /* + * update_process_times() might take tasklist_lock, hence + * drop the base lock. sched-tick hrtimers are per-CPU and + * never accessible by userspace APIs, so this is safe to do. + */ + spin_unlock(&base->lock); update_process_times(user_mode(regs)); profile_tick(CPU_PROFILING); + spin_lock(&base->lock); } /* Do not restart, when we are in the idle loop */ diff --git a/trunk/kernel/timer.c b/trunk/kernel/timer.c index f739dfb539ce..2a00c22203f3 100644 --- a/trunk/kernel/timer.c +++ b/trunk/kernel/timer.c @@ -896,7 +896,7 @@ static void run_timer_softirq(struct softirq_action *h) { tvec_base_t *base = __get_cpu_var(tvec_bases); - hrtimer_run_pending(); + hrtimer_run_queues(); if (time_after_eq(jiffies, base->timer_jiffies)) __run_timers(base); @@ -907,7 +907,6 @@ static void run_timer_softirq(struct softirq_action *h) */ void run_local_timers(void) { - hrtimer_run_queues(); raise_softirq(TIMER_SOFTIRQ); softlockup_tick(); } diff --git a/trunk/kernel/user.c b/trunk/kernel/user.c index bc1c48d35cb3..ab4fd706993b 100644 --- a/trunk/kernel/user.c +++ b/trunk/kernel/user.c @@ -319,7 +319,7 @@ void free_uid(struct user_struct *up) struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) { struct hlist_head *hashent = uidhashentry(ns, uid); - struct user_struct *up, *new; + struct user_struct *up; /* Make uid_hash_find() + uids_user_create() + uid_hash_insert() * atomic. @@ -331,9 +331,13 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) spin_unlock_irq(&uidhash_lock); if (!up) { + struct user_struct *new; + new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); - if (!new) - goto out_unlock; + if (!new) { + uids_mutex_unlock(); + return NULL; + } new->uid = uid; atomic_set(&new->__count, 1); @@ -349,14 +353,28 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) #endif new->locked_shm = 0; - if (alloc_uid_keyring(new, current) < 0) - goto out_free_user; + if (alloc_uid_keyring(new, current) < 0) { + kmem_cache_free(uid_cachep, new); + uids_mutex_unlock(); + return NULL; + } - if (sched_create_user(new) < 0) - goto out_put_keys; + if (sched_create_user(new) < 0) { + key_put(new->uid_keyring); + key_put(new->session_keyring); + kmem_cache_free(uid_cachep, new); + uids_mutex_unlock(); + return NULL; + } - if (uids_user_create(new)) - goto out_destoy_sched; + if (uids_user_create(new)) { + sched_destroy_user(new); + key_put(new->uid_keyring); + key_put(new->session_keyring); + kmem_cache_free(uid_cachep, new); + uids_mutex_unlock(); + return NULL; + } /* * Before adding this, check whether we raced @@ -384,17 +402,6 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) uids_mutex_unlock(); return up; - -out_destoy_sched: - sched_destroy_user(new); -out_put_keys: - key_put(new->uid_keyring); - key_put(new->session_keyring); -out_free_user: - kmem_cache_free(uid_cachep, new); -out_unlock: - uids_mutex_unlock(); - return NULL; } void switch_uid(struct user_struct *new_user) diff --git a/trunk/kernel/workqueue.c b/trunk/kernel/workqueue.c index 52db48e7f6e7..8db0b597509e 100644 --- a/trunk/kernel/workqueue.c +++ b/trunk/kernel/workqueue.c @@ -67,8 +67,9 @@ struct workqueue_struct { #endif }; -/* Serializes the accesses to the list of workqueues. */ -static DEFINE_SPINLOCK(workqueue_lock); +/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove + threads to each one as cpus come/go. */ +static DEFINE_MUTEX(workqueue_mutex); static LIST_HEAD(workqueues); static int singlethread_cpu __read_mostly; @@ -591,6 +592,8 @@ EXPORT_SYMBOL(schedule_delayed_work_on); * Returns zero on success. * Returns -ve errno on failure. * + * Appears to be racy against CPU hotplug. + * * schedule_on_each_cpu() is very slow. */ int schedule_on_each_cpu(work_func_t func) @@ -602,7 +605,7 @@ int schedule_on_each_cpu(work_func_t func) if (!works) return -ENOMEM; - get_online_cpus(); + preempt_disable(); /* CPU hotplug */ for_each_online_cpu(cpu) { struct work_struct *work = per_cpu_ptr(works, cpu); @@ -610,8 +613,8 @@ int schedule_on_each_cpu(work_func_t func) set_bit(WORK_STRUCT_PENDING, work_data_bits(work)); __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), work); } + preempt_enable(); flush_workqueue(keventd_wq); - put_online_cpus(); free_percpu(works); return 0; } @@ -747,10 +750,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name, err = create_workqueue_thread(cwq, singlethread_cpu); start_workqueue_thread(cwq, -1); } else { - get_online_cpus(); - spin_lock(&workqueue_lock); + mutex_lock(&workqueue_mutex); list_add(&wq->list, &workqueues); - spin_unlock(&workqueue_lock); for_each_possible_cpu(cpu) { cwq = init_cpu_workqueue(wq, cpu); @@ -759,7 +760,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, err = create_workqueue_thread(cwq, cpu); start_workqueue_thread(cwq, cpu); } - put_online_cpus(); + mutex_unlock(&workqueue_mutex); } if (err) { @@ -774,7 +775,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) { /* * Our caller is either destroy_workqueue() or CPU_DEAD, - * get_online_cpus() protects cwq->thread. + * workqueue_mutex protects cwq->thread */ if (cwq->thread == NULL) return; @@ -809,11 +810,9 @@ void destroy_workqueue(struct workqueue_struct *wq) struct cpu_workqueue_struct *cwq; int cpu; - get_online_cpus(); - spin_lock(&workqueue_lock); + mutex_lock(&workqueue_mutex); list_del(&wq->list); - spin_unlock(&workqueue_lock); - put_online_cpus(); + mutex_unlock(&workqueue_mutex); for_each_cpu_mask(cpu, *cpu_map) { cwq = per_cpu_ptr(wq->cpu_wq, cpu); @@ -836,6 +835,13 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, action &= ~CPU_TASKS_FROZEN; switch (action) { + case CPU_LOCK_ACQUIRE: + mutex_lock(&workqueue_mutex); + return NOTIFY_OK; + + case CPU_LOCK_RELEASE: + mutex_unlock(&workqueue_mutex); + return NOTIFY_OK; case CPU_UP_PREPARE: cpu_set(cpu, cpu_populated_map); @@ -848,8 +854,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, case CPU_UP_PREPARE: if (!create_workqueue_thread(cwq, cpu)) break; - printk(KERN_ERR "workqueue [%s] for %i failed\n", - wq->name, cpu); + printk(KERN_ERR "workqueue for %i failed\n", cpu); return NOTIFY_BAD; case CPU_ONLINE: diff --git a/trunk/lib/Kconfig.debug b/trunk/lib/Kconfig.debug index 14fb355e3caa..a60109307d32 100644 --- a/trunk/lib/Kconfig.debug +++ b/trunk/lib/Kconfig.debug @@ -517,18 +517,4 @@ config FAULT_INJECTION_STACKTRACE_FILTER help Provide stacktrace filter for fault-injection capabilities -config LATENCYTOP - bool "Latency measuring infrastructure" - select FRAME_POINTER if !MIPS - select KALLSYMS - select KALLSYMS_ALL - select STACKTRACE - select SCHEDSTATS - select SCHED_DEBUG - depends on X86 || X86_64 - help - Enable this option if you want to use the LatencyTOP tool - to find out which userspace is blocking on what kernel operations. - - source "samples/Kconfig" diff --git a/trunk/lib/kernel_lock.c b/trunk/lib/kernel_lock.c index 812dbf00844b..f73e2f8c308f 100644 --- a/trunk/lib/kernel_lock.c +++ b/trunk/lib/kernel_lock.c @@ -9,6 +9,7 @@ #include #include +#ifdef CONFIG_PREEMPT_BKL /* * The 'big kernel semaphore' * @@ -85,6 +86,128 @@ void __lockfunc unlock_kernel(void) up(&kernel_sem); } +#else + +/* + * The 'big kernel lock' + * + * This spinlock is taken and released recursively by lock_kernel() + * and unlock_kernel(). It is transparently dropped and reacquired + * over schedule(). It is used to protect legacy code that hasn't + * been migrated to a proper locking design yet. + * + * Don't use in new code. + */ +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); + + +/* + * Acquire/release the underlying lock from the scheduler. + * + * This is called with preemption disabled, and should + * return an error value if it cannot get the lock and + * TIF_NEED_RESCHED gets set. + * + * If it successfully gets the lock, it should increment + * the preemption count like any spinlock does. + * + * (This works on UP too - _raw_spin_trylock will never + * return false in that case) + */ +int __lockfunc __reacquire_kernel_lock(void) +{ + while (!_raw_spin_trylock(&kernel_flag)) { + if (test_thread_flag(TIF_NEED_RESCHED)) + return -EAGAIN; + cpu_relax(); + } + preempt_disable(); + return 0; +} + +void __lockfunc __release_kernel_lock(void) +{ + _raw_spin_unlock(&kernel_flag); + preempt_enable_no_resched(); +} + +/* + * These are the BKL spinlocks - we try to be polite about preemption. + * If SMP is not on (ie UP preemption), this all goes away because the + * _raw_spin_trylock() will always succeed. + */ +#ifdef CONFIG_PREEMPT +static inline void __lock_kernel(void) +{ + preempt_disable(); + if (unlikely(!_raw_spin_trylock(&kernel_flag))) { + /* + * If preemption was disabled even before this + * was called, there's nothing we can be polite + * about - just spin. + */ + if (preempt_count() > 1) { + _raw_spin_lock(&kernel_flag); + return; + } + + /* + * Otherwise, let's wait for the kernel lock + * with preemption enabled.. + */ + do { + preempt_enable(); + while (spin_is_locked(&kernel_flag)) + cpu_relax(); + preempt_disable(); + } while (!_raw_spin_trylock(&kernel_flag)); + } +} + +#else + +/* + * Non-preemption case - just get the spinlock + */ +static inline void __lock_kernel(void) +{ + _raw_spin_lock(&kernel_flag); +} +#endif + +static inline void __unlock_kernel(void) +{ + /* + * the BKL is not covered by lockdep, so we open-code the + * unlocking sequence (and thus avoid the dep-chain ops): + */ + _raw_spin_unlock(&kernel_flag); + preempt_enable(); +} + +/* + * Getting the big kernel lock. + * + * This cannot happen asynchronously, so we only need to + * worry about other CPU's. + */ +void __lockfunc lock_kernel(void) +{ + int depth = current->lock_depth+1; + if (likely(!depth)) + __lock_kernel(); + current->lock_depth = depth; +} + +void __lockfunc unlock_kernel(void) +{ + BUG_ON(current->lock_depth < 0); + if (likely(--current->lock_depth < 0)) + __unlock_kernel(); +} + +#endif + EXPORT_SYMBOL(lock_kernel); EXPORT_SYMBOL(unlock_kernel); diff --git a/trunk/mm/oom_kill.c b/trunk/mm/oom_kill.c index 96473b482099..91a081a82f55 100644 --- a/trunk/mm/oom_kill.c +++ b/trunk/mm/oom_kill.c @@ -286,7 +286,7 @@ static void __oom_kill_task(struct task_struct *p, int verbose) * all the memory it needs. That way it should be able to * exit() and clear out its resources quickly... */ - p->rt.time_slice = HZ; + p->time_slice = HZ; set_tsk_thread_flag(p, TIF_MEMDIE); force_sig(SIGKILL, p); diff --git a/trunk/mm/slab.c b/trunk/mm/slab.c index 40c00dacbe4b..ff31261fd24f 100644 --- a/trunk/mm/slab.c +++ b/trunk/mm/slab.c @@ -730,7 +730,8 @@ static inline void init_lock_keys(void) #endif /* - * Guard access to the cache-chain. + * 1. Guard access to the cache-chain. + * 2. Protect sanity of cpu_online_map against cpu hotplug events */ static DEFINE_MUTEX(cache_chain_mutex); static struct list_head cache_chain; @@ -1330,11 +1331,12 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, int err = 0; switch (action) { + case CPU_LOCK_ACQUIRE: + mutex_lock(&cache_chain_mutex); + break; case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - mutex_lock(&cache_chain_mutex); err = cpuup_prepare(cpu); - mutex_unlock(&cache_chain_mutex); break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: @@ -1371,8 +1373,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, #endif case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: - mutex_lock(&cache_chain_mutex); cpuup_canceled(cpu); + break; + case CPU_LOCK_RELEASE: mutex_unlock(&cache_chain_mutex); break; } @@ -2167,7 +2170,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, * We use cache_chain_mutex to ensure a consistent view of * cpu_online_map as well. Please see cpuup_callback */ - get_online_cpus(); mutex_lock(&cache_chain_mutex); list_for_each_entry(pc, &cache_chain, next) { @@ -2394,7 +2396,6 @@ kmem_cache_create (const char *name, size_t size, size_t align, panic("kmem_cache_create(): failed to create slab `%s'\n", name); mutex_unlock(&cache_chain_mutex); - put_online_cpus(); return cachep; } EXPORT_SYMBOL(kmem_cache_create); @@ -2546,11 +2547,9 @@ int kmem_cache_shrink(struct kmem_cache *cachep) int ret; BUG_ON(!cachep || in_interrupt()); - get_online_cpus(); mutex_lock(&cache_chain_mutex); ret = __cache_shrink(cachep); mutex_unlock(&cache_chain_mutex); - put_online_cpus(); return ret; } EXPORT_SYMBOL(kmem_cache_shrink); @@ -2576,7 +2575,6 @@ void kmem_cache_destroy(struct kmem_cache *cachep) BUG_ON(!cachep || in_interrupt()); /* Find the cache in the chain of caches. */ - get_online_cpus(); mutex_lock(&cache_chain_mutex); /* * the chain is never empty, cache_cache is never destroyed @@ -2586,7 +2584,6 @@ void kmem_cache_destroy(struct kmem_cache *cachep) slab_error(cachep, "Can't free all objects"); list_add(&cachep->next, &cache_chain); mutex_unlock(&cache_chain_mutex); - put_online_cpus(); return; } @@ -2595,7 +2592,6 @@ void kmem_cache_destroy(struct kmem_cache *cachep) __kmem_cache_destroy(cachep); mutex_unlock(&cache_chain_mutex); - put_online_cpus(); } EXPORT_SYMBOL(kmem_cache_destroy); diff --git a/trunk/net/core/flow.c b/trunk/net/core/flow.c index 6489f4e24ecf..3ed2b4b1d6d4 100644 --- a/trunk/net/core/flow.c +++ b/trunk/net/core/flow.c @@ -293,7 +293,7 @@ void flow_cache_flush(void) static DEFINE_MUTEX(flow_flush_sem); /* Don't want cpus going down or up during this. */ - get_online_cpus(); + lock_cpu_hotplug(); mutex_lock(&flow_flush_sem); atomic_set(&info.cpuleft, num_online_cpus()); init_completion(&info.completion); @@ -305,7 +305,7 @@ void flow_cache_flush(void) wait_for_completion(&info.completion); mutex_unlock(&flow_flush_sem); - put_online_cpus(); + unlock_cpu_hotplug(); } static void __devinit flow_cache_cpu_prepare(int cpu)