Skip to content

Commit

Permalink
x86/resctrl: Rename the RDT functions and definitions
Browse files Browse the repository at this point in the history
As AMD is starting to support RESCTRL features, rename the RDT functions
and definitions to more generic names.

Replace "intel_rdt" with "resctrl" where applicable.

Signed-off-by: Babu Moger <babu.moger@amd.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: "Chang S. Bae" <chang.seok.bae@intel.com>
Cc: David Miller <davem@davemloft.net>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Dmitry Safonov <dima@arista.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Kate Stewart <kstewart@linuxfoundation.org>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: <linux-doc@vger.kernel.org>
Cc: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Philippe Ombredanne <pombredanne@nexb.com>
Cc: Pu Wen <puwen@hygon.cn>
Cc: <qianyue.zj@alibaba-inc.com>
Cc: "Rafael J. Wysocki" <rafael@kernel.org>
Cc: Reinette Chatre <reinette.chatre@intel.com>
Cc: Rian Hunter <rian@alum.mit.edu>
Cc: Sherry Hurwitz <sherry.hurwitz@amd.com>
Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Lendacky <Thomas.Lendacky@amd.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
Cc: <xiaochen.shen@intel.com>
Link: https://lkml.kernel.org/r/20181121202811.4492-3-babu.moger@amd.com
  • Loading branch information
Babu Moger authored and Borislav Petkov committed Nov 22, 2018
1 parent fa7d949 commit 352940e
Show file tree
Hide file tree
Showing 7 changed files with 40 additions and 39 deletions.
24 changes: 12 additions & 12 deletions arch/x86/include/asm/resctrl_sched.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_INTEL_RDT_SCHED_H
#define _ASM_X86_INTEL_RDT_SCHED_H
#ifndef _ASM_X86_RESCTRL_SCHED_H
#define _ASM_X86_RESCTRL_SCHED_H

#ifdef CONFIG_INTEL_RDT

Expand All @@ -10,7 +10,7 @@
#define IA32_PQR_ASSOC 0x0c8f

/**
* struct intel_pqr_state - State cache for the PQR MSR
* struct resctrl_pqr_state - State cache for the PQR MSR
* @cur_rmid: The cached Resource Monitoring ID
* @cur_closid: The cached Class Of Service ID
* @default_rmid: The user assigned Resource Monitoring ID
Expand All @@ -24,21 +24,21 @@
* The cache also helps to avoid pointless updates if the value does
* not change.
*/
struct intel_pqr_state {
struct resctrl_pqr_state {
u32 cur_rmid;
u32 cur_closid;
u32 default_rmid;
u32 default_closid;
};

DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state);

DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);

/*
* __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
* __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
*
* Following considerations are made so that this has minimal impact
* on scheduler hot path:
Expand All @@ -51,9 +51,9 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
* simple as possible.
* Must be called with preemption disabled.
*/
static void __intel_rdt_sched_in(void)
static void __resctrl_sched_in(void)
{
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
u32 closid = state->default_closid;
u32 rmid = state->default_rmid;

Expand All @@ -78,16 +78,16 @@ static void __intel_rdt_sched_in(void)
}
}

static inline void intel_rdt_sched_in(void)
static inline void resctrl_sched_in(void)
{
if (static_branch_likely(&rdt_enable_key))
__intel_rdt_sched_in();
__resctrl_sched_in();
}

#else

static inline void intel_rdt_sched_in(void) {}
static inline void resctrl_sched_in(void) {}

#endif /* CONFIG_INTEL_RDT */

#endif /* _ASM_X86_INTEL_RDT_SCHED_H */
#endif /* _ASM_X86_RESCTRL_SCHED_H */
28 changes: 14 additions & 14 deletions arch/x86/kernel/cpu/resctrl/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
* Software Developer Manual June 2016, volume 3, section 17.17.
*/

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define pr_fmt(fmt) "resctrl: " fmt

#include <linux/slab.h>
#include <linux/err.h>
Expand All @@ -40,12 +40,12 @@
DEFINE_MUTEX(rdtgroup_mutex);

/*
* The cached intel_pqr_state is strictly per CPU and can never be
* The cached resctrl_pqr_state is strictly per CPU and can never be
* updated from a remote CPU. Functions which modify the state
* are called with interrupts disabled and no preemption, which
* is sufficient for the protection.
*/
DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state);

/*
* Used to store the max resource name width and max resource data width
Expand Down Expand Up @@ -639,7 +639,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)

static void clear_closid_rmid(int cpu)
{
struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);

state->default_closid = 0;
state->default_rmid = 0;
Expand All @@ -648,7 +648,7 @@ static void clear_closid_rmid(int cpu)
wrmsr(IA32_PQR_ASSOC, 0, 0);
}

static int intel_rdt_online_cpu(unsigned int cpu)
static int resctrl_online_cpu(unsigned int cpu)
{
struct rdt_resource *r;

Expand All @@ -674,7 +674,7 @@ static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
}
}

static int intel_rdt_offline_cpu(unsigned int cpu)
static int resctrl_offline_cpu(unsigned int cpu)
{
struct rdtgroup *rdtgrp;
struct rdt_resource *r;
Expand Down Expand Up @@ -866,7 +866,7 @@ static __init bool get_rdt_resources(void)

static enum cpuhp_state rdt_online;

static int __init intel_rdt_late_init(void)
static int __init resctrl_late_init(void)
{
struct rdt_resource *r;
int state, ret;
Expand All @@ -877,8 +877,8 @@ static int __init intel_rdt_late_init(void)
rdt_init_padding();

state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"x86/rdt/cat:online:",
intel_rdt_online_cpu, intel_rdt_offline_cpu);
"x86/resctrl/cat:online:",
resctrl_online_cpu, resctrl_offline_cpu);
if (state < 0)
return state;

Expand All @@ -890,20 +890,20 @@ static int __init intel_rdt_late_init(void)
rdt_online = state;

for_each_alloc_capable_rdt_resource(r)
pr_info("Intel RDT %s allocation detected\n", r->name);
pr_info("%s allocation detected\n", r->name);

for_each_mon_capable_rdt_resource(r)
pr_info("Intel RDT %s monitoring detected\n", r->name);
pr_info("%s monitoring detected\n", r->name);

return 0;
}

late_initcall(intel_rdt_late_init);
late_initcall(resctrl_late_init);

static void __exit intel_rdt_exit(void)
static void __exit resctrl_exit(void)
{
cpuhp_remove_state(rdt_online);
rdtgroup_exit();
}

__exitcall(intel_rdt_exit);
__exitcall(resctrl_exit);
2 changes: 1 addition & 1 deletion arch/x86/kernel/cpu/resctrl/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ struct rmid_read {
u64 val;
};

extern unsigned int intel_cqm_threshold;
extern unsigned int resctrl_cqm_threshold;
extern bool rdt_alloc_capable;
extern bool rdt_mon_capable;
extern unsigned int rdt_mon_features;
Expand Down
11 changes: 6 additions & 5 deletions arch/x86/kernel/cpu/resctrl/monitor.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ unsigned int rdt_mon_features;
* This is the threshold cache occupancy at which we will consider an
* RMID available for re-allocation.
*/
unsigned int intel_cqm_threshold;
unsigned int resctrl_cqm_threshold;

static inline struct rmid_entry *__rmid_entry(u32 rmid)
{
Expand Down Expand Up @@ -107,7 +107,7 @@ static bool rmid_dirty(struct rmid_entry *entry)
{
u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);

return val >= intel_cqm_threshold;
return val >= resctrl_cqm_threshold;
}

/*
Expand Down Expand Up @@ -187,7 +187,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry)
list_for_each_entry(d, &r->domains, list) {
if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
if (val <= intel_cqm_threshold)
if (val <= resctrl_cqm_threshold)
continue;
}

Expand Down Expand Up @@ -625,6 +625,7 @@ static void l3_mon_evt_init(struct rdt_resource *r)

int rdt_get_mon_l3_config(struct rdt_resource *r)
{
unsigned int cl_size = boot_cpu_data.x86_cache_size;
int ret;

r->mon_scale = boot_cpu_data.x86_cache_occ_scale;
Expand All @@ -637,10 +638,10 @@ int rdt_get_mon_l3_config(struct rdt_resource *r)
*
* For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
*/
intel_cqm_threshold = boot_cpu_data.x86_cache_size * 1024 / r->num_rmid;
resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid;

/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
intel_cqm_threshold /= r->mon_scale;
resctrl_cqm_threshold /= r->mon_scale;

ret = dom_data_init(r);
if (ret)
Expand Down
10 changes: 5 additions & 5 deletions arch/x86/kernel/cpu/resctrl/rdtgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
}

/*
* This is safe against intel_rdt_sched_in() called from __switch_to()
* This is safe against resctrl_sched_in() called from __switch_to()
* because __switch_to() is executed with interrupts disabled. A local call
* from update_closid_rmid() is proteced against __switch_to() because
* preemption is disabled.
Expand All @@ -317,7 +317,7 @@ static void update_cpu_closid_rmid(void *info)
* executing task might have its own closid selected. Just reuse
* the context switch code.
*/
intel_rdt_sched_in();
resctrl_sched_in();
}

/*
Expand Down Expand Up @@ -542,7 +542,7 @@ static void move_myself(struct callback_head *head)

preempt_disable();
/* update PQR_ASSOC MSR to make resource group go into effect */
intel_rdt_sched_in();
resctrl_sched_in();
preempt_enable();

kfree(callback);
Expand Down Expand Up @@ -926,7 +926,7 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
{
struct rdt_resource *r = of->kn->parent->priv;

seq_printf(seq, "%u\n", intel_cqm_threshold * r->mon_scale);
seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale);

return 0;
}
Expand All @@ -945,7 +945,7 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
if (bytes > (boot_cpu_data.x86_cache_size * 1024))
return -EINVAL;

intel_cqm_threshold = bytes / r->mon_scale;
resctrl_cqm_threshold = bytes / r->mon_scale;

return nbytes;
}
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/process_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
this_cpu_write(current_task, next_p);

/* Load the Intel cache allocation PQR MSR. */
intel_rdt_sched_in();
resctrl_sched_in();

return prev_p;
}
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/process_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -664,7 +664,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
}

/* Load the Intel cache allocation PQR MSR. */
intel_rdt_sched_in();
resctrl_sched_in();

return prev_p;
}
Expand Down

0 comments on commit 352940e

Please sign in to comment.