From b39898cd4077f4b6ec706e717c938751c34e1dc4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 6 Nov 2013 12:30:07 +0100 Subject: [PATCH 1/7] genirq: Prevent spurious detection for unconditionally polled interrupts On a 68k platform a couple of interrupts are demultiplexed and "polled" from a top level interrupt. Unfortunately there is no way to determine which of the sub interrupts raised the top level interrupt, so all of the demultiplexed interrupt handlers need to be invoked. Given a high enough frequency this can trigger the spurious interrupt detection mechanism, if one of the demultiplex interrupts returns IRQ_NONE continuously. But this is a false positive as the polling causes this behaviour and not buggy hardware/software. Introduce IRQ_POLLED which can be set at interrupt chip setup time via irq_set_status_flags(). The flag excludes the interrupt from the spurious detector and from all core polling activities. Reported-and-tested-by: Michael Schmitz Cc: Geert Uytterhoeven Cc: linux-m68k@vger.kernel.org Link: http://lkml.kernel.org/r/alpine.DEB.2.02.1311061149250.23353@ionos.tec.linutronix.de --- include/linux/irq.h | 7 ++++++- kernel/irq/settings.h | 7 +++++++ kernel/irq/spurious.c | 12 +++++++++--- 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/include/linux/irq.h b/include/linux/irq.h index 56bb0dc8b7d44..7dc10036eff55 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -70,6 +70,9 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data); * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context * IRQ_NESTED_TRHEAD - Interrupt nests into another thread * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable + * IRQ_IS_POLLED - Always polled by another interrupt. Exclude + * it from the spurious interrupt detection + * mechanism and from core side polling. */ enum { IRQ_TYPE_NONE = 0x00000000, @@ -94,12 +97,14 @@ enum { IRQ_NESTED_THREAD = (1 << 15), IRQ_NOTHREAD = (1 << 16), IRQ_PER_CPU_DEVID = (1 << 17), + IRQ_IS_POLLED = (1 << 18), }; #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ - IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID) + IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ + IRQ_IS_POLLED) #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index 1162f1030f18f..3320b84cc60f7 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h @@ -14,6 +14,7 @@ enum { _IRQ_NO_BALANCING = IRQ_NO_BALANCING, _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, + _IRQ_IS_POLLED = IRQ_IS_POLLED, _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, }; @@ -26,6 +27,7 @@ enum { #define IRQ_NOAUTOEN GOT_YOU_MORON #define IRQ_NESTED_THREAD GOT_YOU_MORON #define IRQ_PER_CPU_DEVID GOT_YOU_MORON +#define IRQ_IS_POLLED GOT_YOU_MORON #undef IRQF_MODIFY_MASK #define IRQF_MODIFY_MASK GOT_YOU_MORON @@ -147,3 +149,8 @@ static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) { return desc->status_use_accessors & _IRQ_NESTED_THREAD; } + +static inline bool irq_settings_is_polled(struct irq_desc *desc) +{ + return desc->status_use_accessors & _IRQ_IS_POLLED; +} diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 7b5f012bde9d7..a1d8cc63b56e5 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -67,8 +67,13 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) raw_spin_lock(&desc->lock); - /* PER_CPU and nested thread interrupts are never polled */ - if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc)) + /* + * PER_CPU, nested thread interrupts and interrupts explicitely + * marked polled are excluded from polling. + */ + if (irq_settings_is_per_cpu(desc) || + irq_settings_is_nested_thread(desc) || + irq_settings_is_polled(desc)) goto out; /* @@ -268,7 +273,8 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc, void note_interrupt(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) { - if (desc->istate & IRQS_POLL_INPROGRESS) + if (desc->istate & IRQS_POLL_INPROGRESS || + irq_settings_is_polled(desc)) return; /* we get here again via the threaded handler */ From 09f90f6685cd88b6b904c141035d096169958cc4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 11 Nov 2013 21:01:03 +0100 Subject: [PATCH 2/7] m68k: Simplify low level interrupt handling code The low level interrupt entry code of m68k contains the following: add_preempt_count(HARDIRQ_OFFSET); do_IRQ(); irq_enter(); add_preempt_count(HARDIRQ_OFFSET); handle_interrupt(); irq_exit(); sub_preempt_count(HARDIRQ_OFFSET); if (in_interrupt()) return; <---- On m68k always taken! if (local_softirq_pending()) do_softirq(); sub_preempt_count(HARDIRQ_OFFSET); if (in_hardirq()) return; if (status_on_stack_has_interrupt_priority_mask > 0) return; if (local_softirq_pending()) do_softirq(); ret_from_exception: if (interrupted_context_is_kernel) return: .... I tried to find a proper explanation for this, but the changelog is sparse and there are no mails explaining it further. But obviously this relates to the interrupt priority levels of the m68k and tries to be extra clever with nested interrupts. Though this cleverness just adds code bloat to the interrupt hotpath. For the common case of non nested interrupts the code runs through two extra conditionals to the only important one, which checks whether the return is to kernel or user space. For the nested case the checks for in_hardirq() and the priority mask value on stack catch only the case where the nested interrupt happens inside the hard irq context of the first interrupt. If the nested interrupt happens while the first interrupt handles soft interrupts, then these extra checks buy nothing. The nested interrupt will fall through to the final kernel/user space return check at ret_from_exception. Changing the code flow in the following way: do_IRQ(); irq_enter(); add_preempt_count(HARDIRQ_OFFSET); handle_interrupt(); irq_exit(); sub_preempt_count(HARDIRQ_OFFSET); if (in_interrupt()) return; if (local_softirq_pending()) do_softirq(); ret_from_exception: if (interrupted_context_is_kernel) return: makes the region protected by the hardirq count slightly smaller and the softirq handling is invoked with a minimal deeper stack. But otherwise it's completely functional equivalent and saves 104 bytes of text in arch/m68k/kernel/entry.o. This modification allows us further to get rid of the limitations which m68k puts on the preempt_count layout, so we can make the preempt count bits completely generic. Signed-off-by: Thomas Gleixner Tested-by: Michael Schmitz Acked-by: Geert Uytterhoeven Cc: Linux/m68k Cc: Andreas Schwab Link: http://lkml.kernel.org/r/alpine.DEB.2.02.1311112052360.30673@ionos.tec.linutronix.de --- arch/m68k/kernel/entry.S | 40 ++++---------------------------- arch/m68k/kernel/ints.c | 6 ----- arch/m68k/platform/68000/entry.S | 33 +++++++------------------- arch/m68k/platform/68360/entry.S | 24 +++---------------- 4 files changed, 15 insertions(+), 88 deletions(-) diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S index a78f5649e8dec..b54ac7aba850e 100644 --- a/arch/m68k/kernel/entry.S +++ b/arch/m68k/kernel/entry.S @@ -45,7 +45,7 @@ .globl system_call, buserr, trap, resume .globl sys_call_table .globl __sys_fork, __sys_clone, __sys_vfork -.globl ret_from_interrupt, bad_interrupt +.globl bad_interrupt .globl auto_irqhandler_fixup .globl user_irqvec_fixup @@ -275,8 +275,6 @@ do_delayed_trace: ENTRY(auto_inthandler) SAVE_ALL_INT GET_CURRENT(%d0) - movel %d0,%a1 - addqb #1,%a1@(TINFO_PREEMPT+1) | put exception # in d0 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 subw #VEC_SPUR,%d0 @@ -286,32 +284,13 @@ ENTRY(auto_inthandler) auto_irqhandler_fixup = . + 2 jsr do_IRQ | process the IRQ addql #8,%sp | pop parameters off stack - -ret_from_interrupt: - movel %curptr@(TASK_STACK),%a1 - subqb #1,%a1@(TINFO_PREEMPT+1) - jeq ret_from_last_interrupt -2: RESTORE_ALL - - ALIGN -ret_from_last_interrupt: - moveq #(~ALLOWINT>>8)&0xff,%d0 - andb %sp@(PT_OFF_SR),%d0 - jne 2b - - /* check if we need to do software interrupts */ - tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING - jeq .Lret_from_exception - pea ret_from_exception - jra do_softirq + jra ret_from_exception /* Handler for user defined interrupt vectors */ ENTRY(user_inthandler) SAVE_ALL_INT GET_CURRENT(%d0) - movel %d0,%a1 - addqb #1,%a1@(TINFO_PREEMPT+1) | put exception # in d0 bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0 user_irqvec_fixup = . + 2 @@ -321,29 +300,18 @@ user_irqvec_fixup = . + 2 movel %d0,%sp@- | put vector # on stack jsr do_IRQ | process the IRQ addql #8,%sp | pop parameters off stack - - movel %curptr@(TASK_STACK),%a1 - subqb #1,%a1@(TINFO_PREEMPT+1) - jeq ret_from_last_interrupt - RESTORE_ALL + jra ret_from_exception /* Handler for uninitialized and spurious interrupts */ ENTRY(bad_inthandler) SAVE_ALL_INT GET_CURRENT(%d0) - movel %d0,%a1 - addqb #1,%a1@(TINFO_PREEMPT+1) movel %sp,%sp@- jsr handle_badint addql #4,%sp - - movel %curptr@(TASK_STACK),%a1 - subqb #1,%a1@(TINFO_PREEMPT+1) - jeq ret_from_last_interrupt - RESTORE_ALL - + jra ret_from_exception resume: /* diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c index 4d7da384eea03..077d3a70fed19 100644 --- a/arch/m68k/kernel/ints.c +++ b/arch/m68k/kernel/ints.c @@ -58,12 +58,6 @@ void __init init_IRQ(void) { int i; - /* assembly irq entry code relies on this... */ - if (HARDIRQ_MASK != 0x00ff0000) { - extern void hardirq_mask_is_broken(void); - hardirq_mask_is_broken(); - } - for (i = IRQ_AUTO_1; i <= IRQ_AUTO_7; i++) irq_set_chip_and_handler(i, &auto_irq_chip, handle_simple_irq); diff --git a/arch/m68k/platform/68000/entry.S b/arch/m68k/platform/68000/entry.S index 7f91c2fde5096..23ac054c6e1ad 100644 --- a/arch/m68k/platform/68000/entry.S +++ b/arch/m68k/platform/68000/entry.S @@ -27,7 +27,6 @@ .globl ret_from_exception .globl ret_from_signal .globl sys_call_table -.globl ret_from_interrupt .globl bad_interrupt .globl inthandler1 .globl inthandler2 @@ -137,7 +136,7 @@ inthandler1: movel #65,%sp@- /* put vector # on stack*/ jbsr process_int /* process the IRQ*/ 3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt + bra ret_from_exception inthandler2: SAVE_ALL_INT @@ -148,7 +147,7 @@ inthandler2: movel #66,%sp@- /* put vector # on stack*/ jbsr process_int /* process the IRQ*/ 3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt + bra ret_from_exception inthandler3: SAVE_ALL_INT @@ -159,7 +158,7 @@ inthandler3: movel #67,%sp@- /* put vector # on stack*/ jbsr process_int /* process the IRQ*/ 3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt + bra ret_from_exception inthandler4: SAVE_ALL_INT @@ -170,7 +169,7 @@ inthandler4: movel #68,%sp@- /* put vector # on stack*/ jbsr process_int /* process the IRQ*/ 3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt + bra ret_from_exception inthandler5: SAVE_ALL_INT @@ -181,7 +180,7 @@ inthandler5: movel #69,%sp@- /* put vector # on stack*/ jbsr process_int /* process the IRQ*/ 3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt + bra ret_from_exception inthandler6: SAVE_ALL_INT @@ -192,7 +191,7 @@ inthandler6: movel #70,%sp@- /* put vector # on stack*/ jbsr process_int /* process the IRQ*/ 3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt + bra ret_from_exception inthandler7: SAVE_ALL_INT @@ -203,7 +202,7 @@ inthandler7: movel #71,%sp@- /* put vector # on stack*/ jbsr process_int /* process the IRQ*/ 3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt + bra ret_from_exception inthandler: SAVE_ALL_INT @@ -214,23 +213,7 @@ inthandler: movel %d0,%sp@- /* put vector # on stack*/ jbsr process_int /* process the IRQ*/ 3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt - -ret_from_interrupt: - jeq 1f -2: - RESTORE_ALL -1: - moveb %sp@(PT_OFF_SR), %d0 - and #7, %d0 - jhi 2b - - /* check if we need to do software interrupts */ - jeq ret_from_exception - - pea ret_from_exception - jra do_softirq - + bra ret_from_exception /* * Handler for uninitialized and spurious interrupts. diff --git a/arch/m68k/platform/68360/entry.S b/arch/m68k/platform/68360/entry.S index 904fd9a4af4e4..447c33ef37fda 100644 --- a/arch/m68k/platform/68360/entry.S +++ b/arch/m68k/platform/68360/entry.S @@ -29,7 +29,6 @@ .globl ret_from_exception .globl ret_from_signal .globl sys_call_table -.globl ret_from_interrupt .globl bad_interrupt .globl inthandler @@ -132,26 +131,9 @@ inthandler: movel %sp,%sp@- movel %d0,%sp@- /* put vector # on stack*/ - jbsr do_IRQ /* process the IRQ*/ -3: addql #8,%sp /* pop parameters off stack*/ - bra ret_from_interrupt - -ret_from_interrupt: - jeq 1f -2: - RESTORE_ALL -1: - moveb %sp@(PT_OFF_SR), %d0 - and #7, %d0 - jhi 2b - /* check if we need to do software interrupts */ - - movel irq_stat+CPUSTAT_SOFTIRQ_PENDING,%d0 - jeq ret_from_exception - - pea ret_from_exception - jra do_softirq - + jbsr do_IRQ /* process the IRQ */ + addql #8,%sp /* pop parameters off stack*/ + jra ret_from_exception /* * Handler for uninitialized and spurious interrupts. From 54197e43a4a9a0f3fc406d72d9815754e84fab1e Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Sep 2013 18:53:05 +0000 Subject: [PATCH 3/7] hardirq: Make hardirq bits generic There is no reason for per arch hardirq bits. Make them all generic Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20130917183628.534494408@linutronix.de --- arch/blackfin/include/asm/hardirq.h | 3 --- arch/cris/include/asm/hardirq.h | 12 ------------ arch/m32r/include/asm/hardirq.h | 16 --------------- arch/m68k/include/asm/hardirq.h | 11 ----------- arch/s390/include/asm/hardirq.h | 2 -- arch/sparc/include/asm/hardirq_32.h | 1 - arch/sparc/include/asm/hardirq_64.h | 2 -- arch/tile/include/asm/hardirq.h | 2 -- include/linux/preempt_mask.h | 30 ++++++++--------------------- 9 files changed, 8 insertions(+), 71 deletions(-) diff --git a/arch/blackfin/include/asm/hardirq.h b/arch/blackfin/include/asm/hardirq.h index c078dd78d998b..58b54a6d5a162 100644 --- a/arch/blackfin/include/asm/hardirq.h +++ b/arch/blackfin/include/asm/hardirq.h @@ -12,9 +12,6 @@ extern void ack_bad_irq(unsigned int irq); #define ack_bad_irq ack_bad_irq -/* Define until common code gets sane defaults */ -#define HARDIRQ_BITS 9 - #include #endif diff --git a/arch/cris/include/asm/hardirq.h b/arch/cris/include/asm/hardirq.h index 17bb12d760b2e..04126f7bfab2a 100644 --- a/arch/cris/include/asm/hardirq.h +++ b/arch/cris/include/asm/hardirq.h @@ -2,18 +2,6 @@ #define __ASM_HARDIRQ_H #include - -#define HARDIRQ_BITS 8 - -/* - * The hardirq mask has to be large enough to have - * space for potentially all IRQ sources in the system - * nesting on a single CPU: - */ -#if (1 << HARDIRQ_BITS) < NR_IRQS -# error HARDIRQ_BITS is too low! -#endif - #include #endif /* __ASM_HARDIRQ_H */ diff --git a/arch/m32r/include/asm/hardirq.h b/arch/m32r/include/asm/hardirq.h index 4c31c0ae215e8..5f2ac4f64ddfd 100644 --- a/arch/m32r/include/asm/hardirq.h +++ b/arch/m32r/include/asm/hardirq.h @@ -3,22 +3,6 @@ #define __ASM_HARDIRQ_H #include - -#if NR_IRQS > 256 -#define HARDIRQ_BITS 9 -#else -#define HARDIRQ_BITS 8 -#endif - -/* - * The hardirq mask has to be large enough to have - * space for potentially all IRQ sources in the system - * nesting on a single CPU: - */ -#if (1 << HARDIRQ_BITS) < NR_IRQS -# error HARDIRQ_BITS is too low! -#endif - #include #endif /* __ASM_HARDIRQ_H */ diff --git a/arch/m68k/include/asm/hardirq.h b/arch/m68k/include/asm/hardirq.h index db30ed276878d..6c618529d9b96 100644 --- a/arch/m68k/include/asm/hardirq.h +++ b/arch/m68k/include/asm/hardirq.h @@ -5,17 +5,6 @@ #include #include -#define HARDIRQ_BITS 8 - -/* - * The hardirq mask has to be large enough to have - * space for potentially all IRQ sources in the system - * nesting on a single CPU: - */ -#if (1 << HARDIRQ_BITS) < NR_IRQS -# error HARDIRQ_BITS is too low! -#endif - #ifdef CONFIG_MMU static inline void ack_bad_irq(unsigned int irq) diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h index a908d2941c5d9..b7eabaaeffbd0 100644 --- a/arch/s390/include/asm/hardirq.h +++ b/arch/s390/include/asm/hardirq.h @@ -18,8 +18,6 @@ #define __ARCH_HAS_DO_SOFTIRQ #define __ARCH_IRQ_EXIT_IRQS_DISABLED -#define HARDIRQ_BITS 8 - static inline void ack_bad_irq(unsigned int irq) { printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); diff --git a/arch/sparc/include/asm/hardirq_32.h b/arch/sparc/include/asm/hardirq_32.h index 162007643cdcb..ee93923b7f825 100644 --- a/arch/sparc/include/asm/hardirq_32.h +++ b/arch/sparc/include/asm/hardirq_32.h @@ -7,7 +7,6 @@ #ifndef __SPARC_HARDIRQ_H #define __SPARC_HARDIRQ_H -#define HARDIRQ_BITS 8 #include #endif /* __SPARC_HARDIRQ_H */ diff --git a/arch/sparc/include/asm/hardirq_64.h b/arch/sparc/include/asm/hardirq_64.h index 7c29fd1a87aa4..f478ff1ddd028 100644 --- a/arch/sparc/include/asm/hardirq_64.h +++ b/arch/sparc/include/asm/hardirq_64.h @@ -14,6 +14,4 @@ void ack_bad_irq(unsigned int irq); -#define HARDIRQ_BITS 8 - #endif /* !(__SPARC64_HARDIRQ_H) */ diff --git a/arch/tile/include/asm/hardirq.h b/arch/tile/include/asm/hardirq.h index 822390f9a154a..54110af23985a 100644 --- a/arch/tile/include/asm/hardirq.h +++ b/arch/tile/include/asm/hardirq.h @@ -42,6 +42,4 @@ DECLARE_PER_CPU(irq_cpustat_t, irq_stat); #include /* Standard mappings for irq_cpustat_t above */ -#define HARDIRQ_BITS 8 - #endif /* _ASM_TILE_HARDIRQ_H */ diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h index 931bc616219f6..810d7e386f202 100644 --- a/include/linux/preempt_mask.h +++ b/include/linux/preempt_mask.h @@ -11,36 +11,22 @@ * - bits 0-7 are the preemption count (max preemption depth: 256) * - bits 8-15 are the softirq count (max # of softirqs: 256) * - * The hardirq count can in theory reach the same as NR_IRQS. - * In reality, the number of nested IRQS is limited to the stack - * size as well. For archs with over 1000 IRQS it is not practical - * to expect that they will all nest. We give a max of 10 bits for - * hardirq nesting. An arch may choose to give less than 10 bits. - * m68k expects it to be 8. - * - * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) - * - bit 26 is the NMI_MASK - * - bit 27 is the PREEMPT_ACTIVE flag + * The hardirq count could in theory be the same as the number of + * interrupts in the system, but we run all interrupt handlers with + * interrupts disabled, so we cannot have nesting interrupts. Though + * there are a few palaeontologic drivers which reenable interrupts in + * the handler, so we need more than one bit here. * * PREEMPT_MASK: 0x000000ff * SOFTIRQ_MASK: 0x0000ff00 - * HARDIRQ_MASK: 0x03ff0000 - * NMI_MASK: 0x04000000 + * HARDIRQ_MASK: 0x000f0000 + * NMI_MASK: 0x00100000 */ #define PREEMPT_BITS 8 #define SOFTIRQ_BITS 8 +#define HARDIRQ_BITS 4 #define NMI_BITS 1 -#define MAX_HARDIRQ_BITS 10 - -#ifndef HARDIRQ_BITS -# define HARDIRQ_BITS MAX_HARDIRQ_BITS -#endif - -#if HARDIRQ_BITS > MAX_HARDIRQ_BITS -#error HARDIRQ_BITS too high! -#endif - #define PREEMPT_SHIFT 0 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) From 650e4dc2a75959fdca1eecc0147bbb21dbfadf0f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Sep 2013 18:53:07 +0000 Subject: [PATCH 4/7] m32r: Use preempt_schedule_irq Use the proper core function instead of fiddling with preempt_active and interrupt enable in the low level code. Signed-off-by: Thomas Gleixner Cc: Hirokazu Takata Cc: linux-m32r-ja@ml.linux-m32r.org Link: http://lkml.kernel.org/r/20130917183628.758421136@linutronix.de --- arch/m32r/kernel/entry.S | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S index 0c01543f10cd9..7c3db9940ce1f 100644 --- a/arch/m32r/kernel/entry.S +++ b/arch/m32r/kernel/entry.S @@ -182,13 +182,7 @@ need_resched: ld r4, PSW(sp) ; interrupts off (exception path) ? and3 r4, r4, #0x4000 beqz r4, restore_all - LDIMM (r4, PREEMPT_ACTIVE) - st r4, @(TI_PRE_COUNT, r8) - ENABLE_INTERRUPTS(r4) - bl schedule - ldi r4, #0 - st r4, @(TI_PRE_COUNT, r8) - DISABLE_INTERRUPTS(r4) + bl preempt_schedule_irq bra need_resched #endif From aa0d53260596c1bef23547537724d4bad78e6f52 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Sep 2013 18:53:08 +0000 Subject: [PATCH 5/7] ia64: Use preempt_schedule_irq Use the proper core function instead of fiddling with PREEMPT_ACTIVE and enable/disable interrupts in the low level code. Signed-off-by: Thomas Gleixner Cc: Tony Luck Cc: Fenghua Yu Cc: linux-ia64@vger.kernel.org Link: http://lkml.kernel.org/r/20130917183628.857145384@linutronix.de --- arch/ia64/kernel/entry.S | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 7a53530f22c21..ddea607f948aa 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1169,21 +1169,8 @@ skip_rbs_switch: .work_pending: tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed? (p6) br.cond.sptk.few .notify -#ifdef CONFIG_PREEMPT -(pKStk) dep r21=-1,r0,PREEMPT_ACTIVE_BIT,1 - ;; -(pKStk) st4 [r20]=r21 -#endif - SSM_PSR_I(p0, p6, r2) // enable interrupts - br.call.spnt.many rp=schedule + br.call.spnt.many rp=preempt_schedule_irq .ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check) - RSM_PSR_I(p0, r2, r20) // disable interrupts - ;; -#ifdef CONFIG_PREEMPT -(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 - ;; -(pKStk) st4 [r20]=r0 // preempt_count() <- 0 -#endif (pLvSys)br.cond.sptk.few __paravirt_pending_syscall_end br.cond.sptk.many .work_processed_kernel From 9385d949d5bd0eb642ed05ea263c3638c9f4e372 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Sep 2013 18:53:08 +0000 Subject: [PATCH 6/7] sparc: Use preempt_schedule_irq The low level preemption code fiddles with the PREEMPT_ACTIVE bit for no reason and calls schedule() with interrupts disabled, which is wrong to begin with. Remove the PREEMPT_ACTIVE fiddling and call the proper schedule_preempt_irq() function. Signed-off-by: Thomas Gleixner Acked-by: David S. Miller Cc: sparclinux@vger.kernel.org Link: http://lkml.kernel.org/r/20130917183628.966769884@linutronix.de --- arch/sparc/kernel/rtrap_64.S | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index afa2a9e3d0a0c..76213db97a4a2 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -306,12 +306,10 @@ to_kernel: nop cmp %l4, 0 bne,pn %xcc, kern_fpucheck - sethi %hi(PREEMPT_ACTIVE), %l6 - stw %l6, [%g6 + TI_PRE_COUNT] - call schedule + nop + call preempt_schedule_irq nop ba,pt %xcc, rtrap - stw %g0, [%g6 + TI_PRE_COUNT] #endif kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5 brz,pt %l5, rt_continue From 00d1a39e69d5afa7523dad515a05b21abd17c389 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 17 Sep 2013 18:53:09 +0000 Subject: [PATCH 7/7] preempt: Make PREEMPT_ACTIVE generic No point in having this bit defined by architecture. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20130917183629.090698799@linutronix.de --- arch/alpha/include/asm/thread_info.h | 2 -- arch/arc/include/asm/thread_info.h | 2 -- arch/arm/include/asm/thread_info.h | 6 ------ arch/arm64/include/asm/thread_info.h | 6 ------ arch/avr32/include/asm/thread_info.h | 2 -- arch/blackfin/include/asm/thread_info.h | 2 -- arch/c6x/include/asm/thread_info.h | 2 -- arch/cris/include/asm/thread_info.h | 2 -- arch/frv/include/asm/thread_info.h | 2 -- arch/hexagon/include/asm/thread_info.h | 4 ---- arch/ia64/include/asm/thread_info.h | 3 --- arch/m32r/include/asm/thread_info.h | 2 -- arch/m68k/include/asm/thread_info.h | 2 -- arch/metag/include/asm/thread_info.h | 2 -- arch/microblaze/include/asm/thread_info.h | 2 -- arch/mips/include/asm/thread_info.h | 2 -- arch/mn10300/include/asm/thread_info.h | 2 -- arch/parisc/include/asm/thread_info.h | 3 --- arch/powerpc/include/asm/thread_info.h | 2 -- arch/s390/include/asm/thread_info.h | 2 -- arch/score/include/asm/thread_info.h | 2 -- arch/sh/include/asm/thread_info.h | 2 -- arch/sh/kernel/entry-common.S | 6 ++---- arch/sparc/include/asm/thread_info_32.h | 2 -- arch/sparc/include/asm/thread_info_64.h | 2 -- arch/tile/include/asm/thread_info.h | 2 -- arch/um/include/asm/thread_info.h | 2 -- arch/unicore32/include/asm/thread_info.h | 6 ------ arch/x86/include/asm/thread_info.h | 2 -- arch/xtensa/include/asm/thread_info.h | 2 -- include/linux/preempt_mask.h | 15 +++++---------- include/linux/sched.h | 2 +- 32 files changed, 8 insertions(+), 89 deletions(-) diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 52cd2a4a3ff48..453597b91f3a6 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h @@ -58,8 +58,6 @@ register struct thread_info *__current_thread_info __asm__("$8"); #define THREAD_SIZE_ORDER 1 #define THREAD_SIZE (2*PAGE_SIZE) -#define PREEMPT_ACTIVE 0x40000000 - /* * Thread information flags: * - these are process state flags and used from assembly diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h index 2d50a4cdd7f3d..45be216720118 100644 --- a/arch/arc/include/asm/thread_info.h +++ b/arch/arc/include/asm/thread_info.h @@ -80,8 +80,6 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void) #endif /* !__ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x10000000 - /* * thread information flags * - these are process state flags that various assembly files may need to diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index df5e13d64f2c0..71a06b293489d 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -140,12 +140,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, struct user_vfp_exc __user *); #endif -/* - * We use bit 30 of the preempt_count to indicate that kernel - * preemption is occurring. See . - */ -#define PREEMPT_ACTIVE 0x40000000 - /* * thread information flags: * TIF_SYSCALL_TRACE - syscall trace active diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 23a3c4791d86c..720e70b66ffdc 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -88,12 +88,6 @@ static inline struct thread_info *current_thread_info(void) #endif -/* - * We use bit 30 of the preempt_count to indicate that kernel - * preemption is occurring. See . - */ -#define PREEMPT_ACTIVE 0x40000000 - /* * thread information flags: * TIF_SYSCALL_TRACE - syscall trace active diff --git a/arch/avr32/include/asm/thread_info.h b/arch/avr32/include/asm/thread_info.h index 6dc62e1f94c78..a978f3fe7c25a 100644 --- a/arch/avr32/include/asm/thread_info.h +++ b/arch/avr32/include/asm/thread_info.h @@ -66,8 +66,6 @@ static inline struct thread_info *current_thread_info(void) #endif /* !__ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x40000000 - /* * Thread information flags * - these are process state flags that various assembly files may need to access diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h index 3894005337ba7..55f473bdad361 100644 --- a/arch/blackfin/include/asm/thread_info.h +++ b/arch/blackfin/include/asm/thread_info.h @@ -88,8 +88,6 @@ static inline struct thread_info *current_thread_info(void) #define TI_CPU 12 #define TI_PREEMPT 16 -#define PREEMPT_ACTIVE 0x4000000 - /* * thread information flag bit numbers */ diff --git a/arch/c6x/include/asm/thread_info.h b/arch/c6x/include/asm/thread_info.h index 4c8dc562bd90d..d4e9ef87076db 100644 --- a/arch/c6x/include/asm/thread_info.h +++ b/arch/c6x/include/asm/thread_info.h @@ -84,8 +84,6 @@ struct thread_info *current_thread_info(void) #define put_thread_info(ti) put_task_struct((ti)->task) #endif /* __ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x10000000 - /* * thread information flag bit numbers * - pending work-to-be-done flags are in LSW diff --git a/arch/cris/include/asm/thread_info.h b/arch/cris/include/asm/thread_info.h index 07c8c40c52b31..55dede18c032e 100644 --- a/arch/cris/include/asm/thread_info.h +++ b/arch/cris/include/asm/thread_info.h @@ -44,8 +44,6 @@ struct thread_info { #endif -#define PREEMPT_ACTIVE 0x10000000 - /* * macros/functions for gaining access to the thread information structure */ diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h index bebd7eadc7720..af29e17c01814 100644 --- a/arch/frv/include/asm/thread_info.h +++ b/arch/frv/include/asm/thread_info.h @@ -52,8 +52,6 @@ struct thread_info { #endif -#define PREEMPT_ACTIVE 0x10000000 - /* * macros/functions for gaining access to the thread information structure */ diff --git a/arch/hexagon/include/asm/thread_info.h b/arch/hexagon/include/asm/thread_info.h index f7c32406a711a..a59dad3b36951 100644 --- a/arch/hexagon/include/asm/thread_info.h +++ b/arch/hexagon/include/asm/thread_info.h @@ -73,10 +73,6 @@ struct thread_info { #endif /* __ASSEMBLY__ */ -/* looks like "linux/hardirq.h" uses this. */ - -#define PREEMPT_ACTIVE 0x10000000 - #ifndef __ASSEMBLY__ #define INIT_THREAD_INFO(tsk) \ diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index cade13dd0299f..5957cf61f8980 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h @@ -11,9 +11,6 @@ #include #include -#define PREEMPT_ACTIVE_BIT 30 -#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) - #ifndef __ASSEMBLY__ /* diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h index c074f4c2e858e..00171703402f1 100644 --- a/arch/m32r/include/asm/thread_info.h +++ b/arch/m32r/include/asm/thread_info.h @@ -53,8 +53,6 @@ struct thread_info { #endif -#define PREEMPT_ACTIVE 0x10000000 - #define THREAD_SIZE (PAGE_SIZE << 1) #define THREAD_SIZE_ORDER 1 /* diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h index 126131f94a2ca..21a4784ca5a18 100644 --- a/arch/m68k/include/asm/thread_info.h +++ b/arch/m68k/include/asm/thread_info.h @@ -35,8 +35,6 @@ struct thread_info { }; #endif /* __ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x4000000 - #define INIT_THREAD_INFO(tsk) \ { \ .task = &tsk, \ diff --git a/arch/metag/include/asm/thread_info.h b/arch/metag/include/asm/thread_info.h index 7c4a330061423..b19e9c588a167 100644 --- a/arch/metag/include/asm/thread_info.h +++ b/arch/metag/include/asm/thread_info.h @@ -46,8 +46,6 @@ struct thread_info { #endif -#define PREEMPT_ACTIVE 0x10000000 - #ifdef CONFIG_4KSTACKS #define THREAD_SHIFT 12 #else diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h index de26ea6373ded..8c9d36591a031 100644 --- a/arch/microblaze/include/asm/thread_info.h +++ b/arch/microblaze/include/asm/thread_info.h @@ -106,8 +106,6 @@ static inline struct thread_info *current_thread_info(void) /* thread information allocation */ #endif /* __ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x10000000 - /* * thread information flags * - these are process state flags that various assembly files may diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index f9b24bfbdbae9..4f58ef6d0eed5 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -92,8 +92,6 @@ static inline struct thread_info *current_thread_info(void) #define STACK_WARN (THREAD_SIZE / 8) -#define PREEMPT_ACTIVE 0x10000000 - /* * thread information flags * - these are process state flags that various assembly files may need to diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h index 224b4262486db..bf280eaccd36f 100644 --- a/arch/mn10300/include/asm/thread_info.h +++ b/arch/mn10300/include/asm/thread_info.h @@ -16,8 +16,6 @@ #include -#define PREEMPT_ACTIVE 0x10000000 - #ifdef CONFIG_4KSTACKS #define THREAD_SIZE (4096) #define THREAD_SIZE_ORDER (0) diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index bc7cf120106b3..d5f97ea3a4e1a 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -46,9 +46,6 @@ struct thread_info { #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) -#define PREEMPT_ACTIVE_BIT 28 -#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) - /* * thread information flags */ diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index ba7b1973866e1..8fd6cf6dcee85 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -82,8 +82,6 @@ static inline struct thread_info *current_thread_info(void) #endif /* __ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x10000000 - /* * thread information flag bit numbers */ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index eb5f64d26d061..10e0fcd3633d1 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -111,6 +111,4 @@ static inline struct thread_info *current_thread_info(void) #define is_32bit_task() (1) #endif -#define PREEMPT_ACTIVE 0x4000000 - #endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/score/include/asm/thread_info.h b/arch/score/include/asm/thread_info.h index 1425cc034872e..656b7ada9326a 100644 --- a/arch/score/include/asm/thread_info.h +++ b/arch/score/include/asm/thread_info.h @@ -72,8 +72,6 @@ register struct thread_info *__current_thread_info __asm__("r28"); #endif /* !__ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x10000000 - /* * thread information flags * - these are process state flags that various assembly files may need to diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 45a93669289da..ad27ffa65e2ea 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h @@ -41,8 +41,6 @@ struct thread_info { #endif -#define PREEMPT_ACTIVE 0x10000000 - #if defined(CONFIG_4KSTACKS) #define THREAD_SHIFT 12 #else diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index 9b6e4beeb2962..ca46834294b7f 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -108,7 +108,7 @@ need_resched: and #(0xf0>>1), r0 ! interrupts off (exception path)? cmp/eq #(0xf0>>1), r0 bt noresched - mov.l 3f, r0 + mov.l 1f, r0 jsr @r0 ! call preempt_schedule_irq nop bra need_resched @@ -119,9 +119,7 @@ noresched: nop .align 2 -1: .long PREEMPT_ACTIVE -2: .long schedule -3: .long preempt_schedule_irq +1: .long preempt_schedule_irq #endif ENTRY(resume_userspace) diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h index dd3807599bb9a..96efa7adc2233 100644 --- a/arch/sparc/include/asm/thread_info_32.h +++ b/arch/sparc/include/asm/thread_info_32.h @@ -105,8 +105,6 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define TI_W_SAVED 0x250 /* #define TI_RESTART_BLOCK 0x25n */ /* Nobody cares */ -#define PREEMPT_ACTIVE 0x4000000 - /* * thread information flag bit numbers */ diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index d5e5042510790..2b4e17b79e9ce 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -111,8 +111,6 @@ struct thread_info { #define THREAD_SHIFT PAGE_SHIFT #endif /* PAGE_SHIFT == 13 */ -#define PREEMPT_ACTIVE 0x10000000 - /* * macros/functions for gaining access to the thread information structure */ diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index b8aa6df3e102d..729aa107f64e6 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -113,8 +113,6 @@ extern void _cpu_idle(void); #endif /* !__ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x10000000 - /* * Thread information flags that various assembly files may need to access. * Keep flags accessed frequently in low bits, particular since it makes diff --git a/arch/um/include/asm/thread_info.h b/arch/um/include/asm/thread_info.h index 2c8eeb2df8b43..1c5b2a83046aa 100644 --- a/arch/um/include/asm/thread_info.h +++ b/arch/um/include/asm/thread_info.h @@ -60,8 +60,6 @@ static inline struct thread_info *current_thread_info(void) #endif -#define PREEMPT_ACTIVE 0x10000000 - #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ diff --git a/arch/unicore32/include/asm/thread_info.h b/arch/unicore32/include/asm/thread_info.h index 818b4a1edb5b1..af36d8eabdf1c 100644 --- a/arch/unicore32/include/asm/thread_info.h +++ b/arch/unicore32/include/asm/thread_info.h @@ -117,12 +117,6 @@ static inline struct thread_info *current_thread_info(void) #endif -/* - * We use bit 30 of the preempt_count to indicate that kernel - * preemption is occurring. See . - */ -#define PREEMPT_ACTIVE 0x40000000 - /* * thread information flags: * TIF_SYSCALL_TRACE - syscall trace active diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index c46a46be1ec69..3ba3de457d053 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -153,8 +153,6 @@ struct thread_info { #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) -#define PREEMPT_ACTIVE 0x10000000 - #ifdef CONFIG_X86_32 #define STACK_WARN (THREAD_SIZE/8) diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h index 9481004ac119e..470153e8547c1 100644 --- a/arch/xtensa/include/asm/thread_info.h +++ b/arch/xtensa/include/asm/thread_info.h @@ -76,8 +76,6 @@ struct thread_info { #endif -#define PREEMPT_ACTIVE 0x10000000 - /* * macros/functions for gaining access to the thread information structure */ diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h index 810d7e386f202..d169820203dd8 100644 --- a/include/linux/preempt_mask.h +++ b/include/linux/preempt_mask.h @@ -17,10 +17,11 @@ * there are a few palaeontologic drivers which reenable interrupts in * the handler, so we need more than one bit here. * - * PREEMPT_MASK: 0x000000ff - * SOFTIRQ_MASK: 0x0000ff00 - * HARDIRQ_MASK: 0x000f0000 - * NMI_MASK: 0x00100000 + * PREEMPT_MASK: 0x000000ff + * SOFTIRQ_MASK: 0x0000ff00 + * HARDIRQ_MASK: 0x000f0000 + * NMI_MASK: 0x00100000 + * PREEMPT_ACTIVE: 0x00200000 */ #define PREEMPT_BITS 8 #define SOFTIRQ_BITS 8 @@ -46,15 +47,9 @@ #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) -#ifndef PREEMPT_ACTIVE #define PREEMPT_ACTIVE_BITS 1 #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) -#endif - -#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) -#error PREEMPT_ACTIVE is too low! -#endif #define hardirq_count() (preempt_count() & HARDIRQ_MASK) #define softirq_count() (preempt_count() & SOFTIRQ_MASK) diff --git a/include/linux/sched.h b/include/linux/sched.h index 045b0d2278463..55080df48b704 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -22,7 +22,7 @@ struct sched_param { #include #include #include -#include +#include #include #include