Skip to content

Commit

Permalink
Merge tag 'x86_splitlock_for_6.2' of git://git.kernel.org/pub/scm/lin…
Browse files Browse the repository at this point in the history
…ux/kernel/git/tip/tip

Pull x86 splitlock updates from Dave Hansen:
 "Add a sysctl to control the split lock misery mode.

  This enables users to reduce the penalty inflicted on split lock
  users. There are some proprietary, binary-only games which became
  entirely unplayable with the old penalty.

  Anyone opting into the new mode is, of course, more exposed to the DoS
  nasitness inherent with split locks, but they can play their games
  again"

* tag 'x86_splitlock_for_6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/split_lock: Add sysctl to control the misery mode
  • Loading branch information
Linus Torvalds committed Dec 12, 2022
2 parents 287f037 + 7272093 commit 1cab145
Show file tree
Hide file tree
Showing 2 changed files with 76 additions and 10 deletions.
23 changes: 23 additions & 0 deletions Documentation/admin-guide/sysctl/kernel.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1314,6 +1314,29 @@ watchdog work to be queued by the watchdog timer function, otherwise the NMI
watchdog — if enabled — can detect a hard lockup condition.


split_lock_mitigate (x86 only)
==============================

On x86, each "split lock" imposes a system-wide performance penalty. On larger
systems, large numbers of split locks from unprivileged users can result in
denials of service to well-behaved and potentially more important users.

The kernel mitigates these bad users by detecting split locks and imposing
penalties: forcing them to wait and only allowing one core to execute split
locks at a time.

These mitigations can make those bad applications unbearably slow. Setting
split_lock_mitigate=0 may restore some application performance, but will also
increase system exposure to denial of service attacks from split lock users.

= ===================================================================
0 Disable the mitigation mode - just warns the split lock on kernel log
and exposes the system to denials of service from the split lockers.
1 Enable the mitigation mode (this is the default) - penalizes the split
lockers with intentional performance degradation.
= ===================================================================


stack_erasing
=============

Expand Down
63 changes: 53 additions & 10 deletions arch/x86/kernel/cpu/intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -1034,8 +1034,32 @@ static const struct {

static struct ratelimit_state bld_ratelimit;

static unsigned int sysctl_sld_mitigate = 1;
static DEFINE_SEMAPHORE(buslock_sem);

#ifdef CONFIG_PROC_SYSCTL
static struct ctl_table sld_sysctls[] = {
{
.procname = "split_lock_mitigate",
.data = &sysctl_sld_mitigate,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_douintvec_minmax,
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
{}
};

static int __init sld_mitigate_sysctl_init(void)
{
register_sysctl_init("kernel", sld_sysctls);
return 0;
}

late_initcall(sld_mitigate_sysctl_init);
#endif

static inline bool match_option(const char *arg, int arglen, const char *opt)
{
int len = strlen(opt), ratelimit;
Expand Down Expand Up @@ -1146,12 +1170,20 @@ static void split_lock_init(void)
split_lock_verify_msr(sld_state != sld_off);
}

static void __split_lock_reenable(struct work_struct *work)
static void __split_lock_reenable_unlock(struct work_struct *work)
{
sld_update_msr(true);
up(&buslock_sem);
}

static DECLARE_DELAYED_WORK(sl_reenable_unlock, __split_lock_reenable_unlock);

static void __split_lock_reenable(struct work_struct *work)
{
sld_update_msr(true);
}
static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable);

/*
* If a CPU goes offline with pending delayed work to re-enable split lock
* detection then the delayed work will be executed on some other CPU. That
Expand All @@ -1169,25 +1201,36 @@ static int splitlock_cpu_offline(unsigned int cpu)
return 0;
}

static DECLARE_DELAYED_WORK(split_lock_reenable, __split_lock_reenable);

static void split_lock_warn(unsigned long ip)
{
struct delayed_work *work;
int cpu;

if (!current->reported_split_lock)
pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
current->comm, current->pid, ip);
current->reported_split_lock = 1;

/* misery factor #1, sleep 10ms before trying to execute split lock */
if (msleep_interruptible(10) > 0)
return;
/* Misery factor #2, only allow one buslocked disabled core at a time */
if (down_interruptible(&buslock_sem) == -EINTR)
return;
if (sysctl_sld_mitigate) {
/*
* misery factor #1:
* sleep 10ms before trying to execute split lock.
*/
if (msleep_interruptible(10) > 0)
return;
/*
* Misery factor #2:
* only allow one buslocked disabled core at a time.
*/
if (down_interruptible(&buslock_sem) == -EINTR)
return;
work = &sl_reenable_unlock;
} else {
work = &sl_reenable;
}

cpu = get_cpu();
schedule_delayed_work_on(cpu, &split_lock_reenable, 2);
schedule_delayed_work_on(cpu, work, 2);

/* Disable split lock detection on this CPU to make progress */
sld_update_msr(false);
Expand Down

0 comments on commit 1cab145

Please sign in to comment.