Skip to content

Commit

Permalink
Merge branch 'akpm' (patches from Andrew)
Browse files Browse the repository at this point in the history
Merge misc fixes from Andrew Morton:
 "Subsystems affected by this patch series: mm (memremap, memcg,
  slab-generic, kasan, mempolicy, pagecache, oom-kill, pagemap),
  kthread, signals, lib, epoll, and core-kernel"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  kernel/hung_task.c: make type annotations consistent
  epoll: add a selftest for epoll timeout race
  mm: always have io_remap_pfn_range() set pgprot_decrypted()
  mm, oom: keep oom_adj under or at upper limit when printing
  kthread_worker: prevent queuing delayed work from timer_fn when it is being canceled
  mm/truncate.c: make __invalidate_mapping_pages() static
  lib/crc32test: remove extra local_irq_disable/enable
  ptrace: fix task_join_group_stop() for the case when current is traced
  mm: mempolicy: fix potential pte_unmap_unlock pte error
  kasan: adopt KUNIT tests to SW_TAGS mode
  mm: memcg: link page counters to root if use_hierarchy is false
  mm: memcontrol: correct the NR_ANON_THPS counter of hierarchical memcg
  hugetlb_cgroup: fix reservation accounting
  mm/mremap_pages: fix static key devmap_managed_key updates
  • Loading branch information
Linus Torvalds committed Nov 2, 2020
2 parents 495023e + 3b70ae4 commit b7cbaf5
Show file tree
Hide file tree
Showing 14 changed files with 275 additions and 105 deletions.
2 changes: 2 additions & 0 deletions fs/proc/base.c
Original file line number Diff line number Diff line change
Expand Up @@ -1049,6 +1049,8 @@ static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) /
OOM_SCORE_ADJ_MAX;
put_task_struct(task);
if (oom_adj > OOM_ADJUST_MAX)
oom_adj = OOM_ADJUST_MAX;
len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj);
return simple_read_from_buffer(buf, count, ppos, buffer, len);
}
Expand Down
9 changes: 9 additions & 0 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -2759,6 +2759,15 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
return VM_FAULT_NOPAGE;
}

#ifndef io_remap_pfn_range
static inline int io_remap_pfn_range(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn,
unsigned long size, pgprot_t prot)
{
return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
}
#endif

static inline vm_fault_t vmf_error(int err)
{
if (err == -ENOMEM)
Expand Down
4 changes: 0 additions & 4 deletions include/linux/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -1427,10 +1427,6 @@ typedef unsigned int pgtbl_mod_mask;

#endif /* !__ASSEMBLY__ */

#ifndef io_remap_pfn_range
#define io_remap_pfn_range remap_pfn_range
#endif

#ifndef has_transparent_hugepage
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define has_transparent_hugepage() 1
Expand Down
3 changes: 1 addition & 2 deletions kernel/hung_task.c
Original file line number Diff line number Diff line change
Expand Up @@ -225,8 +225,7 @@ static long hung_timeout_jiffies(unsigned long last_checked,
* Process updating of timeout sysctl
*/
int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret;

Expand Down
3 changes: 2 additions & 1 deletion kernel/kthread.c
Original file line number Diff line number Diff line change
Expand Up @@ -897,7 +897,8 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
/* Move the work from worker->delayed_work_list. */
WARN_ON_ONCE(list_empty(&work->node));
list_del_init(&work->node);
kthread_insert_work(worker, work, &worker->work_list);
if (!work->canceling)
kthread_insert_work(worker, work, &worker->work_list);

raw_spin_unlock_irqrestore(&worker->lock, flags);
}
Expand Down
19 changes: 10 additions & 9 deletions kernel/signal.c
Original file line number Diff line number Diff line change
Expand Up @@ -391,16 +391,17 @@ static bool task_participate_group_stop(struct task_struct *task)

void task_join_group_stop(struct task_struct *task)
{
unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
struct signal_struct *sig = current->signal;

if (sig->group_stop_count) {
sig->group_stop_count++;
mask |= JOBCTL_STOP_CONSUME;
} else if (!(sig->flags & SIGNAL_STOP_STOPPED))
return;

/* Have the new thread join an on-going signal group stop */
unsigned long jobctl = current->jobctl;
if (jobctl & JOBCTL_STOP_PENDING) {
struct signal_struct *sig = current->signal;
unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
if (task_set_jobctl_pending(task, signr | gstop)) {
sig->group_stop_count++;
}
}
task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
}

/*
Expand Down
4 changes: 0 additions & 4 deletions lib/crc32test.c
Original file line number Diff line number Diff line change
Expand Up @@ -683,7 +683,6 @@ static int __init crc32c_test(void)

/* reduce OS noise */
local_irq_save(flags);
local_irq_disable();

nsec = ktime_get_ns();
for (i = 0; i < 100; i++) {
Expand All @@ -694,7 +693,6 @@ static int __init crc32c_test(void)
nsec = ktime_get_ns() - nsec;

local_irq_restore(flags);
local_irq_enable();

pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);

Expand Down Expand Up @@ -768,7 +766,6 @@ static int __init crc32_test(void)

/* reduce OS noise */
local_irq_save(flags);
local_irq_disable();

nsec = ktime_get_ns();
for (i = 0; i < 100; i++) {
Expand All @@ -783,7 +780,6 @@ static int __init crc32_test(void)
nsec = ktime_get_ns() - nsec;

local_irq_restore(flags);
local_irq_enable();

pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
CRC_LE_BITS, CRC_BE_BITS);
Expand Down
149 changes: 107 additions & 42 deletions lib/test_kasan.c
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,12 @@ static void kmalloc_oob_16(struct kunit *test)
u64 words[2];
} *ptr1, *ptr2;

/* This test is specifically crafted for the generic mode. */
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
return;
}

ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);

Expand All @@ -227,6 +233,23 @@ static void kmalloc_oob_16(struct kunit *test)
kfree(ptr2);
}

static void kmalloc_uaf_16(struct kunit *test)
{
struct {
u64 words[2];
} *ptr1, *ptr2;

ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);

ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
kfree(ptr2);

KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
kfree(ptr1);
}

static void kmalloc_oob_memset_2(struct kunit *test)
{
char *ptr;
Expand Down Expand Up @@ -429,6 +452,12 @@ static void kasan_global_oob(struct kunit *test)
volatile int i = 3;
char *p = &global_array[ARRAY_SIZE(global_array) + i];

/* Only generic mode instruments globals. */
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_GENERIC required");
return;
}

KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
}

Expand Down Expand Up @@ -467,6 +496,12 @@ static void kasan_alloca_oob_left(struct kunit *test)
char alloca_array[i];
char *p = alloca_array - 1;

/* Only generic mode instruments dynamic allocas. */
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_GENERIC required");
return;
}

if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
return;
Expand All @@ -481,6 +516,12 @@ static void kasan_alloca_oob_right(struct kunit *test)
char alloca_array[i];
char *p = alloca_array + i;

/* Only generic mode instruments dynamic allocas. */
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_GENERIC required");
return;
}

if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
return;
Expand Down Expand Up @@ -551,6 +592,9 @@ static void kasan_memchr(struct kunit *test)
return;
}

if (OOB_TAG_OFF)
size = round_up(size, OOB_TAG_OFF);

ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);

Expand All @@ -573,6 +617,9 @@ static void kasan_memcmp(struct kunit *test)
return;
}

if (OOB_TAG_OFF)
size = round_up(size, OOB_TAG_OFF);

ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
memset(arr, 0, sizeof(arr));
Expand Down Expand Up @@ -619,69 +666,85 @@ static void kasan_strings(struct kunit *test)
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
}

static void kasan_bitops(struct kunit *test)
static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
{
KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
}

static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
{
KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));

#if defined(clear_bit_unlock_is_negative_byte)
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
clear_bit_unlock_is_negative_byte(nr, addr));
#endif
}

static void kasan_bitops_generic(struct kunit *test)
{
long *bits;

/* This test is specifically crafted for the generic mode. */
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
return;
}

/*
* Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
* this way we do not actually corrupt other memory.
*/
long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);

/*
* Below calls try to access bit within allocated memory; however, the
* below accesses are still out-of-bounds, since bitops are defined to
* operate on the whole long the bit is in.
*/
KUNIT_EXPECT_KASAN_FAIL(test, set_bit(BITS_PER_LONG, bits));

KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(BITS_PER_LONG, bits));

KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(BITS_PER_LONG, bits));

KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(BITS_PER_LONG, bits));

KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(BITS_PER_LONG, bits));

KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(BITS_PER_LONG, bits));

KUNIT_EXPECT_KASAN_FAIL(test, change_bit(BITS_PER_LONG, bits));

KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(BITS_PER_LONG, bits));
kasan_bitops_modify(test, BITS_PER_LONG, bits);

/*
* Below calls try to access bit beyond allocated memory.
*/
KUNIT_EXPECT_KASAN_FAIL(test,
test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));

KUNIT_EXPECT_KASAN_FAIL(test,
__test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));

KUNIT_EXPECT_KASAN_FAIL(test,
test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits));
kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);

KUNIT_EXPECT_KASAN_FAIL(test,
test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
kfree(bits);
}

KUNIT_EXPECT_KASAN_FAIL(test,
__test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
static void kasan_bitops_tags(struct kunit *test)
{
long *bits;

KUNIT_EXPECT_KASAN_FAIL(test,
test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
/* This test is specifically crafted for the tag-based mode. */
if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
kunit_info(test, "CONFIG_KASAN_SW_TAGS required\n");
return;
}

KUNIT_EXPECT_KASAN_FAIL(test,
__test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
/* Allocation size will be rounded to up granule size, which is 16. */
bits = kzalloc(sizeof(*bits), GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);

KUNIT_EXPECT_KASAN_FAIL(test,
kasan_int_result =
test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits));
/* Do the accesses past the 16 allocated bytes. */
kasan_bitops_modify(test, BITS_PER_LONG, &bits[1]);
kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, &bits[1]);

#if defined(clear_bit_unlock_is_negative_byte)
KUNIT_EXPECT_KASAN_FAIL(test,
kasan_int_result = clear_bit_unlock_is_negative_byte(
BITS_PER_LONG + BITS_PER_BYTE, bits));
#endif
kfree(bits);
}

Expand Down Expand Up @@ -728,6 +791,7 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kmalloc_oob_krealloc_more),
KUNIT_CASE(kmalloc_oob_krealloc_less),
KUNIT_CASE(kmalloc_oob_16),
KUNIT_CASE(kmalloc_uaf_16),
KUNIT_CASE(kmalloc_oob_in_memset),
KUNIT_CASE(kmalloc_oob_memset_2),
KUNIT_CASE(kmalloc_oob_memset_4),
Expand All @@ -751,7 +815,8 @@ static struct kunit_case kasan_kunit_test_cases[] = {
KUNIT_CASE(kasan_memchr),
KUNIT_CASE(kasan_memcmp),
KUNIT_CASE(kasan_strings),
KUNIT_CASE(kasan_bitops),
KUNIT_CASE(kasan_bitops_generic),
KUNIT_CASE(kasan_bitops_tags),
KUNIT_CASE(kmalloc_double_kzfree),
KUNIT_CASE(vmalloc_oob),
{}
Expand Down
Loading

0 comments on commit b7cbaf5

Please sign in to comment.