Skip to content

Commit

Permalink
percpu: relegate chunks unusable when failing small allocations
Browse files Browse the repository at this point in the history
In certain cases, requestors of percpu memory may want specific
alignments. However, it is possible to end up in situations where the
contig_hint matches, but the alignment does not. This causes excess
scanning of chunks that will fail. To prevent this, if a small
allocation fails (< 32B), the chunk is moved to the empty list. Once an
allocation is freed from that chunk, it is placed back into rotation.

Signed-off-by: Dennis Zhou <dennis@kernel.org>
Reviewed-by: Peng Fan <peng.fan@nxp.com>
  • Loading branch information
Dennis Zhou committed Mar 13, 2019
1 parent 3e54097 commit 8744d85
Showing 1 changed file with 26 additions and 9 deletions.
35 changes: 26 additions & 9 deletions mm/percpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,8 @@

/* the slots are sorted by free bytes left, 1-31 bytes share the same slot */
#define PCPU_SLOT_BASE_SHIFT 5
/* chunks in slots below this are subject to being sidelined on failed alloc */
#define PCPU_SLOT_FAIL_THRESHOLD 3

#define PCPU_EMPTY_POP_PAGES_LOW 2
#define PCPU_EMPTY_POP_PAGES_HIGH 4
Expand Down Expand Up @@ -488,6 +490,22 @@ static void pcpu_mem_free(void *ptr)
kvfree(ptr);
}

static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
bool move_front)
{
if (chunk != pcpu_reserved_chunk) {
if (move_front)
list_move(&chunk->list, &pcpu_slot[slot]);
else
list_move_tail(&chunk->list, &pcpu_slot[slot]);
}
}

static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
{
__pcpu_chunk_move(chunk, slot, true);
}

/**
* pcpu_chunk_relocate - put chunk in the appropriate chunk slot
* @chunk: chunk of interest
Expand All @@ -505,12 +523,8 @@ static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
{
int nslot = pcpu_chunk_slot(chunk);

if (chunk != pcpu_reserved_chunk && oslot != nslot) {
if (oslot < nslot)
list_move(&chunk->list, &pcpu_slot[nslot]);
else
list_move_tail(&chunk->list, &pcpu_slot[nslot]);
}
if (oslot != nslot)
__pcpu_chunk_move(chunk, nslot, oslot < nslot);
}

/**
Expand Down Expand Up @@ -1395,7 +1409,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
bool do_warn = !(gfp & __GFP_NOWARN);
static int warn_limit = 10;
struct pcpu_chunk *chunk;
struct pcpu_chunk *chunk, *next;
const char *err;
int slot, off, cpu, ret;
unsigned long flags;
Expand Down Expand Up @@ -1457,11 +1471,14 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
restart:
/* search through normal chunks */
for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
list_for_each_entry(chunk, &pcpu_slot[slot], list) {
list_for_each_entry_safe(chunk, next, &pcpu_slot[slot], list) {
off = pcpu_find_block_fit(chunk, bits, bit_align,
is_atomic);
if (off < 0)
if (off < 0) {
if (slot < PCPU_SLOT_FAIL_THRESHOLD)
pcpu_chunk_move(chunk, 0);
continue;
}

off = pcpu_alloc_area(chunk, bits, bit_align, off);
if (off >= 0)
Expand Down

0 comments on commit 8744d85

Please sign in to comment.