Skip to content

Commit

Permalink
Merge tag 'sched-urgent-2022-08-06' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "Various fixes: a deadline scheduler fix, a migration fix, a Sparse fix
  and a comment fix"

* tag 'sched-urgent-2022-08-06' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/core: Do not requeue task on CPU excluded from cpus_mask
  sched/rt: Fix Sparse warnings due to undefined rt.c declarations
  exit: Fix typo in comment: s/sub-theads/sub-threads
  sched, cpuset: Fix dl_cpu_busy() panic due to empty cs->cpus_allowed
  • Loading branch information
Linus Torvalds committed Aug 7, 2022
2 parents 592d836 + 751d4cb commit cac03ac
Show file tree
Hide file tree
Showing 5 changed files with 18 additions and 11 deletions.
2 changes: 1 addition & 1 deletion include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1825,7 +1825,7 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags)
}

extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
Expand Down
2 changes: 1 addition & 1 deletion kernel/cgroup/cpuset.c
Original file line number Diff line number Diff line change
Expand Up @@ -2239,7 +2239,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
goto out_unlock;

cgroup_taskset_for_each(task, css, tset) {
ret = task_can_attach(task, cs->cpus_allowed);
ret = task_can_attach(task, cs->effective_cpus);
if (ret)
goto out_unlock;
ret = security_task_setscheduler(task);
Expand Down
2 changes: 1 addition & 1 deletion kernel/exit.c
Original file line number Diff line number Diff line change
Expand Up @@ -1051,7 +1051,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
* p->signal fields because the whole thread group is dead
* and nobody can change them.
*
* psig->stats_lock also protects us from our sub-theads
* psig->stats_lock also protects us from our sub-threads
* which can reap other children at the same time. Until
* we change k_getrusage()-like users to rely on this lock
* we have to take ->siglock as well.
Expand Down
16 changes: 11 additions & 5 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -3802,7 +3802,7 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
}

static inline bool ttwu_queue_cond(int cpu)
static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
{
/*
* Do not complicate things with the async wake_list while the CPU is
Expand All @@ -3811,6 +3811,10 @@ static inline bool ttwu_queue_cond(int cpu)
if (!cpu_active(cpu))
return false;

/* Ensure the task will still be allowed to run on the CPU. */
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
return false;

/*
* If the CPU does not share cache, then queue the task on the
* remote rqs wakelist to avoid accessing remote data.
Expand Down Expand Up @@ -3840,7 +3844,7 @@ static inline bool ttwu_queue_cond(int cpu)

static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
{
if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu)) {
if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
sched_clock_cpu(cpu); /* Sync clocks across CPUs */
__ttwu_queue_wakelist(p, cpu, wake_flags);
return true;
Expand Down Expand Up @@ -9012,7 +9016,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
}

int task_can_attach(struct task_struct *p,
const struct cpumask *cs_cpus_allowed)
const struct cpumask *cs_effective_cpus)
{
int ret = 0;

Expand All @@ -9031,9 +9035,11 @@ int task_can_attach(struct task_struct *p,
}

if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span,
cs_cpus_allowed)) {
int cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
cs_effective_cpus)) {
int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus);

if (unlikely(cpu >= nr_cpu_ids))
return -EINVAL;
ret = dl_cpu_busy(cpu, p);
}

Expand Down
7 changes: 4 additions & 3 deletions kernel/sched/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -481,9 +481,6 @@ extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);

extern void unregister_rt_sched_group(struct task_group *tg);
extern void free_rt_sched_group(struct task_group *tg);
extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
struct sched_rt_entity *rt_se, int cpu,
struct sched_rt_entity *parent);
Expand Down Expand Up @@ -521,6 +518,10 @@ struct cfs_bandwidth { };

#endif /* CONFIG_CGROUP_SCHED */

extern void unregister_rt_sched_group(struct task_group *tg);
extern void free_rt_sched_group(struct task_group *tg);
extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);

/*
* u64_u32_load/u64_u32_store
*
Expand Down

0 comments on commit cac03ac

Please sign in to comment.