Skip to content

Commit

Permalink
Revert "arm64: initialize per-cpu offsets earlier"
Browse files Browse the repository at this point in the history
This reverts commit 353e228.

Qian Cai reports that TX2 no longer boots with his .config as it appears
that task_cpu() gets instrumented and used before KASAN has been
initialised.

Although Mark has a proposed fix, let's take the safe option of reverting
this for now and sorting it out properly later.

Link: https://lore.kernel.org/r/711bc57a314d8d646b41307008db2845b7537b3d.camel@redhat.com
Reported-by: Qian Cai <cai@redhat.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
  • Loading branch information
Will Deacon committed Oct 9, 2020
1 parent a82e4ef commit d13027b
Show file tree
Hide file tree
Showing 4 changed files with 11 additions and 19 deletions.
2 changes: 0 additions & 2 deletions arch/arm64/include/asm/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,4 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info);
void update_cpu_features(int cpu, struct cpuinfo_arm64 *info,
struct cpuinfo_arm64 *boot);

void init_this_cpu_offset(void);

#endif /* __ASM_CPU_H */
3 changes: 0 additions & 3 deletions arch/arm64/kernel/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -448,8 +448,6 @@ SYM_FUNC_START_LOCAL(__primary_switched)
bl __pi_memset
dsb ishst // Make zero page visible to PTW

bl init_this_cpu_offset

#ifdef CONFIG_KASAN
bl kasan_early_init
#endif
Expand Down Expand Up @@ -756,7 +754,6 @@ SYM_FUNC_START_LOCAL(__secondary_switched)
ptrauth_keys_init_cpu x2, x3, x4, x5
#endif

bl init_this_cpu_offset
b secondary_start_kernel
SYM_FUNC_END(__secondary_switched)

Expand Down
12 changes: 6 additions & 6 deletions arch/arm64/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,12 @@ void __init smp_setup_processor_id(void)
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
set_cpu_logical_map(0, mpidr);

/*
* clear __my_cpu_offset on boot CPU to avoid hang caused by
* using percpu variable early, for example, lockdep will
* access percpu variable inside lock_release
*/
set_my_cpu_offset(0);
pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
(unsigned long)mpidr, read_cpuid_id());
}
Expand Down Expand Up @@ -276,12 +282,6 @@ u64 cpu_logical_map(int cpu)
}
EXPORT_SYMBOL_GPL(cpu_logical_map);

void noinstr init_this_cpu_offset(void)
{
unsigned int cpu = task_cpu(current);
set_my_cpu_offset(per_cpu_offset(cpu));
}

void __init __no_sanitize_address setup_arch(char **cmdline_p)
{
init_mm.start_code = (unsigned long) _text;
Expand Down
13 changes: 5 additions & 8 deletions arch/arm64/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,10 @@ asmlinkage notrace void secondary_start_kernel(void)
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
struct mm_struct *mm = &init_mm;
const struct cpu_operations *ops;
unsigned int cpu = smp_processor_id();
unsigned int cpu;

cpu = task_cpu(current);
set_my_cpu_offset(per_cpu_offset(cpu));

/*
* All kernel threads share the same mm context; grab a
Expand Down Expand Up @@ -432,13 +435,7 @@ void __init smp_cpus_done(unsigned int max_cpus)

void __init smp_prepare_boot_cpu(void)
{
/*
* Now that setup_per_cpu_areas() has allocated the runtime per-cpu
* areas it is only safe to read the CPU0 boot-time area, and we must
* reinitialize the offset to point to the runtime area.
*/
init_this_cpu_offset();

set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
cpuinfo_store_boot_cpu();

/*
Expand Down

0 comments on commit d13027b

Please sign in to comment.