Skip to content

Commit

Permalink
x86/resctrl: Add resctrl_arch_ prefix to pseudo lock functions
Browse files Browse the repository at this point in the history
resctrl's pseudo lock has some copy-to-cache and measurement functions that
are micro-architecture specific.

For example, pseudo_lock_fn() is not at all portable.

Label these 'resctrl_arch_' so they stay under /arch/x86.  To expose these
functions to the filesystem code they need an entry in a header file, and
can't be marked static.

Signed-off-by: James Morse <james.morse@arm.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Shaopeng Tan <tan.shaopeng@jp.fujitsu.com>
Reviewed-by: Tony Luck <tony.luck@intel.com>
Reviewed-by: Reinette Chatre <reinette.chatre@intel.com>
Reviewed-by: Fenghua Yu <fenghuay@nvidia.com>
Reviewed-by: Babu Moger <babu.moger@amd.com>
Tested-by: Carl Worth <carl@os.amperecomputing.com> # arm64
Tested-by: Shaopeng Tan <tan.shaopeng@jp.fujitsu.com>
Tested-by: Peter Newman <peternewman@google.com>
Tested-by: Amit Singh Tomar <amitsinght@marvell.com> # arm64
Tested-by: Shanker Donthineni <sdonthineni@nvidia.com> # arm64
Tested-by: Babu Moger <babu.moger@amd.com>
Link: https://lore.kernel.org/r/20250311183715.16445-24-james.morse@arm.com
  • Loading branch information
James Morse authored and Borislav Petkov (AMD) committed Mar 12, 2025
1 parent c32a7d7 commit 7d0ec14
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 17 deletions.
5 changes: 5 additions & 0 deletions arch/x86/include/asm/resctrl.h
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,11 @@ static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid
static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid,
void *ctx) { };

u64 resctrl_arch_get_prefetch_disable_bits(void);
int resctrl_arch_pseudo_lock_fn(void *_rdtgrp);
int resctrl_arch_measure_cycles_lat_fn(void *_plr);
int resctrl_arch_measure_l2_residency(void *_plr);
int resctrl_arch_measure_l3_residency(void *_plr);
void resctrl_cpu_detect(struct cpuinfo_x86 *c);

#else
Expand Down
36 changes: 19 additions & 17 deletions arch/x86/kernel/cpu/resctrl/pseudo_lock.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,8 @@ static const struct class pseudo_lock_class = {
};

/**
* get_prefetch_disable_bits - prefetch disable bits of supported platforms
* resctrl_arch_get_prefetch_disable_bits - prefetch disable bits of supported
* platforms
* @void: It takes no parameters.
*
* Capture the list of platforms that have been validated to support
Expand All @@ -75,13 +76,13 @@ static const struct class pseudo_lock_class = {
* in the SDM.
*
* When adding a platform here also add support for its cache events to
* measure_cycles_perf_fn()
* resctrl_arch_measure_l*_residency()
*
* Return:
* If platform is supported, the bits to disable hardware prefetchers, 0
* if platform is not supported.
*/
static u64 get_prefetch_disable_bits(void)
u64 resctrl_arch_get_prefetch_disable_bits(void)
{
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
boot_cpu_data.x86 != 6)
Expand Down Expand Up @@ -408,7 +409,7 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp)
}

/**
* pseudo_lock_fn - Load kernel memory into cache
* resctrl_arch_pseudo_lock_fn - Load kernel memory into cache
* @_rdtgrp: resource group to which pseudo-lock region belongs
*
* This is the core pseudo-locking flow.
Expand All @@ -426,7 +427,7 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp)
*
* Return: 0. Waiter on waitqueue will be woken on completion.
*/
static int pseudo_lock_fn(void *_rdtgrp)
int resctrl_arch_pseudo_lock_fn(void *_rdtgrp)
{
struct rdtgroup *rdtgrp = _rdtgrp;
struct pseudo_lock_region *plr = rdtgrp->plr;
Expand Down Expand Up @@ -712,7 +713,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
* Not knowing the bits to disable prefetching implies that this
* platform does not support Cache Pseudo-Locking.
*/
prefetch_disable_bits = get_prefetch_disable_bits();
prefetch_disable_bits = resctrl_arch_get_prefetch_disable_bits();
if (prefetch_disable_bits == 0) {
rdt_last_cmd_puts("Pseudo-locking not supported\n");
return -EINVAL;
Expand Down Expand Up @@ -872,7 +873,8 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d)
}

/**
* measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory
* resctrl_arch_measure_cycles_lat_fn - Measure cycle latency to read
* pseudo-locked memory
* @_plr: pseudo-lock region to measure
*
* There is no deterministic way to test if a memory region is cached. One
Expand All @@ -885,7 +887,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d)
*
* Return: 0. Waiter on waitqueue will be woken on completion.
*/
static int measure_cycles_lat_fn(void *_plr)
int resctrl_arch_measure_cycles_lat_fn(void *_plr)
{
struct pseudo_lock_region *plr = _plr;
u32 saved_low, saved_high;
Expand Down Expand Up @@ -1069,7 +1071,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
return 0;
}

static int measure_l2_residency(void *_plr)
int resctrl_arch_measure_l2_residency(void *_plr)
{
struct pseudo_lock_region *plr = _plr;
struct residency_counts counts = {0};
Expand Down Expand Up @@ -1107,7 +1109,7 @@ static int measure_l2_residency(void *_plr)
return 0;
}

static int measure_l3_residency(void *_plr)
int resctrl_arch_measure_l3_residency(void *_plr)
{
struct pseudo_lock_region *plr = _plr;
struct residency_counts counts = {0};
Expand Down Expand Up @@ -1205,14 +1207,14 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
plr->cpu = cpu;

if (sel == 1)
thread = kthread_run_on_cpu(measure_cycles_lat_fn, plr,
cpu, "pseudo_lock_measure/%u");
thread = kthread_run_on_cpu(resctrl_arch_measure_cycles_lat_fn,
plr, cpu, "pseudo_lock_measure/%u");
else if (sel == 2)
thread = kthread_run_on_cpu(measure_l2_residency, plr,
cpu, "pseudo_lock_measure/%u");
thread = kthread_run_on_cpu(resctrl_arch_measure_l2_residency,
plr, cpu, "pseudo_lock_measure/%u");
else if (sel == 3)
thread = kthread_run_on_cpu(measure_l3_residency, plr,
cpu, "pseudo_lock_measure/%u");
thread = kthread_run_on_cpu(resctrl_arch_measure_l3_residency,
plr, cpu, "pseudo_lock_measure/%u");
else
goto out;

Expand Down Expand Up @@ -1307,7 +1309,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)

plr->thread_done = 0;

thread = kthread_run_on_cpu(pseudo_lock_fn, rdtgrp,
thread = kthread_run_on_cpu(resctrl_arch_pseudo_lock_fn, rdtgrp,
plr->cpu, "pseudo_lock/%u");
if (IS_ERR(thread)) {
ret = PTR_ERR(thread);
Expand Down

0 comments on commit 7d0ec14

Please sign in to comment.