Skip to content

Commit

Permalink
sched/membarrier: Return -ENOMEM to userspace on memory allocation fa…
Browse files Browse the repository at this point in the history
…ilure

Remove the IPI fallback code from membarrier to deal with very
infrequent cpumask memory allocation failure. Use GFP_KERNEL rather
than GFP_NOWAIT, and relax the blocking guarantees for the expedited
membarrier system call commands, allowing it to block if waiting for
memory to be made available.

In addition, now -ENOMEM can be returned to user-space if the cpumask
memory allocation fails.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Kirill Tkhai <tkhai@yandex.ru>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King - ARM Linux admin <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20190919173705.2181-8-mathieu.desnoyers@efficios.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Mathieu Desnoyers authored and Ingo Molnar committed Sep 25, 2019
1 parent c6d68c1 commit c172e0a
Showing 1 changed file with 20 additions and 43 deletions.
63 changes: 20 additions & 43 deletions kernel/sched/membarrier.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ void membarrier_exec_mmap(struct mm_struct *mm)
static int membarrier_global_expedited(void)
{
int cpu;
bool fallback = false;
cpumask_var_t tmpmask;

if (num_online_cpus() == 1)
Expand All @@ -78,15 +77,8 @@ static int membarrier_global_expedited(void)
*/
smp_mb(); /* system call entry is not a mb. */

/*
* Expedited membarrier commands guarantee that they won't
* block, hence the GFP_NOWAIT allocation flag and fallback
* implementation.
*/
if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
/* Fallback for OOM. */
fallback = true;
}
if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
return -ENOMEM;

cpus_read_lock();
rcu_read_lock();
Expand Down Expand Up @@ -117,18 +109,15 @@ static int membarrier_global_expedited(void)
if (p->flags & PF_KTHREAD)
continue;

if (!fallback)
__cpumask_set_cpu(cpu, tmpmask);
else
smp_call_function_single(cpu, ipi_mb, NULL, 1);
__cpumask_set_cpu(cpu, tmpmask);
}
rcu_read_unlock();
if (!fallback) {
preempt_disable();
smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
preempt_enable();
free_cpumask_var(tmpmask);
}

preempt_disable();
smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
preempt_enable();

free_cpumask_var(tmpmask);
cpus_read_unlock();

/*
Expand All @@ -143,7 +132,6 @@ static int membarrier_global_expedited(void)
static int membarrier_private_expedited(int flags)
{
int cpu;
bool fallback = false;
cpumask_var_t tmpmask;
struct mm_struct *mm = current->mm;

Expand All @@ -168,15 +156,8 @@ static int membarrier_private_expedited(int flags)
*/
smp_mb(); /* system call entry is not a mb. */

/*
* Expedited membarrier commands guarantee that they won't
* block, hence the GFP_NOWAIT allocation flag and fallback
* implementation.
*/
if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
/* Fallback for OOM. */
fallback = true;
}
if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
return -ENOMEM;

cpus_read_lock();
rcu_read_lock();
Expand All @@ -195,20 +176,16 @@ static int membarrier_private_expedited(int flags)
continue;
rcu_read_lock();
p = rcu_dereference(cpu_rq(cpu)->curr);
if (p && p->mm == mm) {
if (!fallback)
__cpumask_set_cpu(cpu, tmpmask);
else
smp_call_function_single(cpu, ipi_mb, NULL, 1);
}
if (p && p->mm == mm)
__cpumask_set_cpu(cpu, tmpmask);
}
rcu_read_unlock();
if (!fallback) {
preempt_disable();
smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
preempt_enable();
free_cpumask_var(tmpmask);
}

preempt_disable();
smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
preempt_enable();

free_cpumask_var(tmpmask);
cpus_read_unlock();

/*
Expand Down Expand Up @@ -264,7 +241,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm)
struct rq *rq = cpu_rq(cpu);
struct task_struct *p;

p = rcu_dereference(&rq->curr);
p = rcu_dereference(rq->curr);
if (p && p->mm == mm)
__cpumask_set_cpu(cpu, tmpmask);
}
Expand Down

0 comments on commit c172e0a

Please sign in to comment.