Skip to content

Commit

Permalink
Merge tag 'sched-core-2024-11-18' into loongarch-next
Browse files Browse the repository at this point in the history
LoongArch architecture changes for 6.13 depend on the sched-core changes
(PREEMPT_LAZY) to completely support RT, so merge them to create a base.
  • Loading branch information
Huacai Chen committed Nov 22, 2024
2 parents adc2186 + 771d271 commit 1b9bc42
Show file tree
Hide file tree
Showing 42 changed files with 1,105 additions and 504 deletions.
1 change: 1 addition & 0 deletions arch/riscv/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ config RISCV
select ARCH_HAS_MMIOWB
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PMEM_API
select ARCH_HAS_PREEMPT_LAZY
select ARCH_HAS_PREPARE_SYNC_CORE_CMD
select ARCH_HAS_PTE_DEVMAP if 64BIT && MMU
select ARCH_HAS_PTE_SPECIAL
Expand Down
10 changes: 6 additions & 4 deletions arch/riscv/include/asm/thread_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,19 +107,21 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
* - pending work-to-be-done flags are in lowest half-word
* - other flags in upper half-word(s)
*/
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_NEED_RESCHED 0 /* rescheduling necessary */
#define TIF_NEED_RESCHED_LAZY 1 /* Lazy rescheduling needed */
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_SIGPENDING 3 /* signal pending */
#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */
#define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */
#define TIF_32BIT 11 /* compat-mode 32bit process */
#define TIF_RISCV_V_DEFER_RESTORE 12 /* restore Vector before returing to user */

#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE)
Expand Down
1 change: 1 addition & 0 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ config X86
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_PREEMPT_LAZY
select ARCH_HAS_PTE_DEVMAP if X86_64
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_HW_PTE_YOUNG
Expand Down
6 changes: 4 additions & 2 deletions arch/x86/include/asm/thread_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,8 +87,9 @@ struct thread_info {
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
#define TIF_SSBD 5 /* Speculative store bypass disable */
#define TIF_NEED_RESCHED_LAZY 4 /* Lazy rescheduling needed */
#define TIF_SINGLESTEP 5 /* reenable singlestep on user return*/
#define TIF_SSBD 6 /* Speculative store bypass disable */
#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
#define TIF_SPEC_L1D_FLUSH 10 /* Flush L1D on mm switches (processes) */
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
Expand All @@ -110,6 +111,7 @@ struct thread_info {
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_SSBD (1 << TIF_SSBD)
#define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
Expand Down
2 changes: 1 addition & 1 deletion fs/exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -990,7 +990,7 @@ static int exec_mmap(struct mm_struct *mm)
active_mm = tsk->active_mm;
tsk->active_mm = mm;
tsk->mm = mm;
mm_init_cid(mm);
mm_init_cid(mm, tsk);
/*
* This prevents preemption while active_mm is being loaded and
* it and mm are being updated, which could cause problems for
Expand Down
3 changes: 2 additions & 1 deletion include/linux/entry-common.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,8 @@

#define EXIT_TO_USER_MODE_WORK \
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
_TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
_TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \
ARCH_EXIT_TO_USER_MODE_WORK)

/**
Expand Down
5 changes: 3 additions & 2 deletions include/linux/entry-kvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,9 @@
#endif

#define XFER_TO_GUEST_MODE_WORK \
(_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \
_TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | _TIF_SIGPENDING | \
_TIF_NOTIFY_SIGNAL | _TIF_NOTIFY_RESUME | \
ARCH_XFER_TO_GUEST_MODE_WORK)

struct kvm_vcpu;

Expand Down
72 changes: 63 additions & 9 deletions include/linux/mm_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -782,6 +782,7 @@ struct vm_area_struct {
struct mm_cid {
u64 time;
int cid;
int recent_cid;
};
#endif

Expand Down Expand Up @@ -852,6 +853,27 @@ struct mm_struct {
* When the next mm_cid scan is due (in jiffies).
*/
unsigned long mm_cid_next_scan;
/**
* @nr_cpus_allowed: Number of CPUs allowed for mm.
*
* Number of CPUs allowed in the union of all mm's
* threads allowed CPUs.
*/
unsigned int nr_cpus_allowed;
/**
* @max_nr_cid: Maximum number of concurrency IDs allocated.
*
* Track the highest number of concurrency IDs allocated for the
* mm.
*/
atomic_t max_nr_cid;
/**
* @cpus_allowed_lock: Lock protecting mm cpus_allowed.
*
* Provide mutual exclusion for mm cpus_allowed and
* mm nr_cpus_allowed updates.
*/
raw_spinlock_t cpus_allowed_lock;
#endif
#ifdef CONFIG_MMU
atomic_long_t pgtables_bytes; /* size of all page tables */
Expand Down Expand Up @@ -1170,36 +1192,53 @@ static inline int mm_cid_clear_lazy_put(int cid)
return cid & ~MM_CID_LAZY_PUT;
}

/*
* mm_cpus_allowed: Union of all mm's threads allowed CPUs.
*/
static inline cpumask_t *mm_cpus_allowed(struct mm_struct *mm)
{
unsigned long bitmap = (unsigned long)mm;

bitmap += offsetof(struct mm_struct, cpu_bitmap);
/* Skip cpu_bitmap */
bitmap += cpumask_size();
return (struct cpumask *)bitmap;
}

/* Accessor for struct mm_struct's cidmask. */
static inline cpumask_t *mm_cidmask(struct mm_struct *mm)
{
unsigned long cid_bitmap = (unsigned long)mm;
unsigned long cid_bitmap = (unsigned long)mm_cpus_allowed(mm);

cid_bitmap += offsetof(struct mm_struct, cpu_bitmap);
/* Skip cpu_bitmap */
/* Skip mm_cpus_allowed */
cid_bitmap += cpumask_size();
return (struct cpumask *)cid_bitmap;
}

static inline void mm_init_cid(struct mm_struct *mm)
static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
{
int i;

for_each_possible_cpu(i) {
struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i);

pcpu_cid->cid = MM_CID_UNSET;
pcpu_cid->recent_cid = MM_CID_UNSET;
pcpu_cid->time = 0;
}
mm->nr_cpus_allowed = p->nr_cpus_allowed;
atomic_set(&mm->max_nr_cid, 0);
raw_spin_lock_init(&mm->cpus_allowed_lock);
cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
cpumask_clear(mm_cidmask(mm));
}

static inline int mm_alloc_cid_noprof(struct mm_struct *mm)
static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p)
{
mm->pcpu_cid = alloc_percpu_noprof(struct mm_cid);
if (!mm->pcpu_cid)
return -ENOMEM;
mm_init_cid(mm);
mm_init_cid(mm, p);
return 0;
}
#define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__))
Expand All @@ -1212,16 +1251,31 @@ static inline void mm_destroy_cid(struct mm_struct *mm)

static inline unsigned int mm_cid_size(void)
{
return cpumask_size();
return 2 * cpumask_size(); /* mm_cpus_allowed(), mm_cidmask(). */
}

static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumask *cpumask)
{
struct cpumask *mm_allowed = mm_cpus_allowed(mm);

if (!mm)
return;
/* The mm_cpus_allowed is the union of each thread allowed CPUs masks. */
raw_spin_lock(&mm->cpus_allowed_lock);
cpumask_or(mm_allowed, mm_allowed, cpumask);
WRITE_ONCE(mm->nr_cpus_allowed, cpumask_weight(mm_allowed));
raw_spin_unlock(&mm->cpus_allowed_lock);
}
#else /* CONFIG_SCHED_MM_CID */
static inline void mm_init_cid(struct mm_struct *mm) { }
static inline int mm_alloc_cid(struct mm_struct *mm) { return 0; }
static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { }
static inline int mm_alloc_cid(struct mm_struct *mm, struct task_struct *p) { return 0; }
static inline void mm_destroy_cid(struct mm_struct *mm) { }

static inline unsigned int mm_cid_size(void)
{
return 0;
}
static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumask *cpumask) { }
#endif /* CONFIG_SCHED_MM_CID */

struct mmu_gather;
Expand Down
8 changes: 7 additions & 1 deletion include/linux/preempt.h
Original file line number Diff line number Diff line change
Expand Up @@ -486,6 +486,7 @@ DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())
extern bool preempt_model_none(void);
extern bool preempt_model_voluntary(void);
extern bool preempt_model_full(void);
extern bool preempt_model_lazy(void);

#else

Expand All @@ -502,6 +503,11 @@ static inline bool preempt_model_full(void)
return IS_ENABLED(CONFIG_PREEMPT);
}

static inline bool preempt_model_lazy(void)
{
return IS_ENABLED(CONFIG_PREEMPT_LAZY);
}

#endif

static inline bool preempt_model_rt(void)
Expand All @@ -519,7 +525,7 @@ static inline bool preempt_model_rt(void)
*/
static inline bool preempt_model_preemptible(void)
{
return preempt_model_full() || preempt_model_rt();
return preempt_model_full() || preempt_model_lazy() || preempt_model_rt();
}

#endif /* __LINUX_PREEMPT_H */
5 changes: 3 additions & 2 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -1898,7 +1898,7 @@ extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];

#ifdef CONFIG_THREAD_INFO_IN_TASK
# define task_thread_info(task) (&(task)->thread_info)
#elif !defined(__HAVE_THREAD_FUNCTIONS)
#else
# define task_thread_info(task) ((struct thread_info *)(task)->stack)
#endif

Expand Down Expand Up @@ -2002,7 +2002,8 @@ static inline void set_tsk_need_resched(struct task_struct *tsk)

static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
atomic_long_andnot(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY,
(atomic_long_t *)&task_thread_info(tsk)->flags);
}

static inline int test_tsk_need_resched(struct task_struct *tsk)
Expand Down
1 change: 0 additions & 1 deletion include/linux/sched/ext.h
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,6 @@ struct sched_ext_entity {
#ifdef CONFIG_EXT_GROUP_SCHED
struct cgroup *cgrp_moving_from;
#endif
/* must be the last field, see init_scx_entity() */
struct list_head tasks_node;
};

Expand Down
2 changes: 1 addition & 1 deletion include/linux/sched/task_stack.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ static __always_inline unsigned long *end_of_stack(const struct task_struct *tas
#endif
}

#elif !defined(__HAVE_THREAD_FUNCTIONS)
#else

#define task_stack_page(task) ((void *)(task)->stack)

Expand Down
21 changes: 17 additions & 4 deletions include/linux/thread_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,14 @@ enum syscall_work_bit {

#include <asm/thread_info.h>

#ifndef TIF_NEED_RESCHED_LAZY
#ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
#error Inconsistent PREEMPT_LAZY
#endif
#define TIF_NEED_RESCHED_LAZY TIF_NEED_RESCHED
#define _TIF_NEED_RESCHED_LAZY _TIF_NEED_RESCHED
#endif

#ifdef __KERNEL__

#ifndef arch_set_restart_data
Expand Down Expand Up @@ -179,22 +187,27 @@ static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti

#ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H

static __always_inline bool tif_need_resched(void)
static __always_inline bool tif_test_bit(int bit)
{
return arch_test_bit(TIF_NEED_RESCHED,
return arch_test_bit(bit,
(unsigned long *)(&current_thread_info()->flags));
}

#else

static __always_inline bool tif_need_resched(void)
static __always_inline bool tif_test_bit(int bit)
{
return test_bit(TIF_NEED_RESCHED,
return test_bit(bit,
(unsigned long *)(&current_thread_info()->flags));
}

#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */

static __always_inline bool tif_need_resched(void)
{
return tif_test_bit(TIF_NEED_RESCHED);
}

#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
static inline int arch_within_stack_frames(const void * const stack,
const void * const stackend,
Expand Down
Loading

0 comments on commit 1b9bc42

Please sign in to comment.