Skip to content

Commit

Permalink
kernel.h: Add non_block_start/end()
Browse files Browse the repository at this point in the history
In some special cases we must not block, but there's not a spinlock,
preempt-off, irqs-off or similar critical section already that arms the
might_sleep() debug checks. Add a non_block_start/end() pair to annotate
these.

This will be used in the oom paths of mmu-notifiers, where blocking is not
allowed to make sure there's forward progress. Quoting Michal:

"The notifier is called from quite a restricted context - oom_reaper -
which shouldn't depend on any locks or sleepable conditionals. The code
should be swift as well but we mostly do care about it to make a forward
progress. Checking for sleepable context is the best thing we could come
up with that would describe these demands at least partially."

Peter also asked whether we want to catch spinlocks on top, but Michal
said those are less of a problem because spinlocks can't have an indirect
dependency upon the page allocator and hence close the loop with the oom
reaper.

Suggested by Michal Hocko.

Link: https://lore.kernel.org/r/20190826201425.17547-4-daniel.vetter@ffwll.ch
Acked-by: Christian König <christian.koenig@amd.com> (v1)
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
  • Loading branch information
Daniel Vetter authored and Jason Gunthorpe committed Sep 7, 2019
1 parent f2bc09e commit 312364f
Show file tree
Hide file tree
Showing 3 changed files with 40 additions and 6 deletions.
23 changes: 22 additions & 1 deletion include/linux/kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,9 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
* might_sleep - annotation for functions that can sleep
*
* this macro will print a stack trace if it is executed in an atomic
* context (spinlock, irq-handler, ...).
* context (spinlock, irq-handler, ...). Additional sections where blocking is
* not allowed can be annotated with non_block_start() and non_block_end()
* pairs.
*
* This is a useful debugging help to be able to catch problems early and not
* be bitten later when the calling function happens to sleep when it is not
Expand All @@ -233,6 +235,23 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
# define cant_sleep() \
do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
# define sched_annotate_sleep() (current->task_state_change = 0)
/**
* non_block_start - annotate the start of section where sleeping is prohibited
*
* This is on behalf of the oom reaper, specifically when it is calling the mmu
* notifiers. The problem is that if the notifier were to block on, for example,
* mutex_lock() and if the process which holds that mutex were to perform a
* sleeping memory allocation, the oom reaper is now blocked on completion of
* that memory allocation. Other blocking calls like wait_event() pose similar
* issues.
*/
# define non_block_start() (current->non_block_count++)
/**
* non_block_end - annotate the end of section where sleeping is prohibited
*
* Closes a section opened by non_block_start().
*/
# define non_block_end() WARN_ON(current->non_block_count-- == 0)
#else
static inline void ___might_sleep(const char *file, int line,
int preempt_offset) { }
Expand All @@ -241,6 +260,8 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
# define might_sleep() do { might_resched(); } while (0)
# define cant_sleep() do { } while (0)
# define sched_annotate_sleep() do { } while (0)
# define non_block_start() do { } while (0)
# define non_block_end() do { } while (0)
#endif

#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
Expand Down
4 changes: 4 additions & 0 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -974,6 +974,10 @@ struct task_struct {
struct mutex_waiter *blocked_on;
#endif

#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
int non_block_count;
#endif

#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
unsigned long hardirq_enable_ip;
Expand Down
19 changes: 14 additions & 5 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -3700,13 +3700,22 @@ static noinline void __schedule_bug(struct task_struct *prev)
/*
* Various schedule()-time debugging checks and statistics:
*/
static inline void schedule_debug(struct task_struct *prev)
static inline void schedule_debug(struct task_struct *prev, bool preempt)
{
#ifdef CONFIG_SCHED_STACK_END_CHECK
if (task_stack_end_corrupted(prev))
panic("corrupted stack end detected inside scheduler\n");
#endif

#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
if (!preempt && prev->state && prev->non_block_count) {
printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
prev->comm, prev->pid, prev->non_block_count);
dump_stack();
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
#endif

if (unlikely(in_atomic_preempt_off())) {
__schedule_bug(prev);
preempt_count_set(PREEMPT_DISABLED);
Expand Down Expand Up @@ -3813,7 +3822,7 @@ static void __sched notrace __schedule(bool preempt)
rq = cpu_rq(cpu);
prev = rq->curr;

schedule_debug(prev);
schedule_debug(prev, preempt);

if (sched_feat(HRTICK))
hrtick_clear(rq);
Expand Down Expand Up @@ -6570,7 +6579,7 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
rcu_sleep_check();

if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
!is_idle_task(current)) ||
!is_idle_task(current) && !current->non_block_count) ||
system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
oops_in_progress)
return;
Expand All @@ -6586,8 +6595,8 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
"BUG: sleeping function called from invalid context at %s:%d\n",
file, line);
printk(KERN_ERR
"in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
in_atomic(), irqs_disabled(),
"in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
in_atomic(), irqs_disabled(), current->non_block_count,
current->pid, current->comm);

if (task_stack_end_corrupted(current))
Expand Down

0 comments on commit 312364f

Please sign in to comment.