Skip to content

Commit

Permalink
sched: Massage set_cpus_allowed()
Browse files Browse the repository at this point in the history
Thread a u32 flags word through the *set_cpus_allowed*() callchain.
This will allow adding behavioural tweaks for future users.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Reviewed-by: Daniel Bristot de Oliveira <bristot@redhat.com>
Link: https://lkml.kernel.org/r/20201023102346.729082820@infradead.org
  • Loading branch information
Peter Zijlstra committed Nov 10, 2020
1 parent 120455c commit 9cfc3e1
Show file tree
Hide file tree
Showing 3 changed files with 26 additions and 14 deletions.
28 changes: 18 additions & 10 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1824,13 +1824,14 @@ static int migration_cpu_stop(void *data)
* sched_class::set_cpus_allowed must do the below, but is not required to
* actually call this function.
*/
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
{
cpumask_copy(&p->cpus_mask, new_mask);
p->nr_cpus_allowed = cpumask_weight(new_mask);
}

void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
static void
__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags)
{
struct rq *rq = task_rq(p);
bool queued, running;
Expand All @@ -1851,14 +1852,19 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
if (running)
put_prev_task(rq, p);

p->sched_class->set_cpus_allowed(p, new_mask);
p->sched_class->set_cpus_allowed(p, new_mask, flags);

if (queued)
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
if (running)
set_next_task(rq, p);
}

void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
__do_set_cpus_allowed(p, new_mask, 0);
}

/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
Expand All @@ -1869,7 +1875,8 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
* call is not atomic; no spinlocks may be held.
*/
static int __set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask, bool check)
const struct cpumask *new_mask,
u32 flags)
{
const struct cpumask *cpu_valid_mask = cpu_active_mask;
unsigned int dest_cpu;
Expand All @@ -1891,7 +1898,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
* Must re-check here, to close a race against __kthread_bind(),
* sched_setaffinity() is not guaranteed to observe the flag.
*/
if (check && (p->flags & PF_NO_SETAFFINITY)) {
if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
ret = -EINVAL;
goto out;
}
Expand All @@ -1910,7 +1917,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
goto out;
}

do_set_cpus_allowed(p, new_mask);
__do_set_cpus_allowed(p, new_mask, flags);

if (p->flags & PF_KTHREAD) {
/*
Expand Down Expand Up @@ -1947,7 +1954,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,

int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
{
return __set_cpus_allowed_ptr(p, new_mask, false);
return __set_cpus_allowed_ptr(p, new_mask, 0);
}
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);

Expand Down Expand Up @@ -2406,7 +2413,8 @@ void sched_set_stop_task(int cpu, struct task_struct *stop)
#else

static inline int __set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask, bool check)
const struct cpumask *new_mask,
u32 flags)
{
return set_cpus_allowed_ptr(p, new_mask);
}
Expand Down Expand Up @@ -6006,7 +6014,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
}
#endif
again:
retval = __set_cpus_allowed_ptr(p, new_mask, true);
retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK);

if (!retval) {
cpuset_cpus_allowed(p, cpus_allowed);
Expand Down Expand Up @@ -6590,7 +6598,7 @@ void init_idle(struct task_struct *idle, int cpu)
*
* And since this is boot we can forgo the serialization.
*/
set_cpus_allowed_common(idle, cpumask_of(cpu));
set_cpus_allowed_common(idle, cpumask_of(cpu), 0);
#endif
/*
* We're having a chicken and egg problem, even though we are
Expand Down
5 changes: 3 additions & 2 deletions kernel/sched/deadline.c
Original file line number Diff line number Diff line change
Expand Up @@ -2301,7 +2301,8 @@ static void task_woken_dl(struct rq *rq, struct task_struct *p)
}

static void set_cpus_allowed_dl(struct task_struct *p,
const struct cpumask *new_mask)
const struct cpumask *new_mask,
u32 flags)
{
struct root_domain *src_rd;
struct rq *rq;
Expand Down Expand Up @@ -2330,7 +2331,7 @@ static void set_cpus_allowed_dl(struct task_struct *p,
raw_spin_unlock(&src_dl_b->lock);
}

set_cpus_allowed_common(p, new_mask);
set_cpus_allowed_common(p, new_mask, flags);
}

/* Assumes rq->lock is held */
Expand Down
7 changes: 5 additions & 2 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1814,7 +1814,8 @@ struct sched_class {
void (*task_woken)(struct rq *this_rq, struct task_struct *task);

void (*set_cpus_allowed)(struct task_struct *p,
const struct cpumask *newmask);
const struct cpumask *newmask,
u32 flags);

void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);
Expand Down Expand Up @@ -1907,7 +1908,9 @@ extern void update_group_capacity(struct sched_domain *sd, int cpu);

extern void trigger_load_balance(struct rq *rq);

extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
#define SCA_CHECK 0x01

extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags);

#endif

Expand Down

0 comments on commit 9cfc3e1

Please sign in to comment.