Skip to content

Commit

Permalink
net: implement per-cpu reserves for memory_allocated
Browse files Browse the repository at this point in the history
We plan keeping sk->sk_forward_alloc as small as possible
in future patches.

This means we are going to call sk_memory_allocated_add()
and sk_memory_allocated_sub() more often.

Implement a per-cpu cache of +1/-1 MB, to reduce number
of changes to sk->sk_prot->memory_allocated, which
would otherwise be cause of false sharing.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
  • Loading branch information
Eric Dumazet authored and Jakub Kicinski committed Jun 10, 2022
1 parent 0defbb0 commit 3cd3399
Showing 1 changed file with 29 additions and 9 deletions.
38 changes: 29 additions & 9 deletions include/net/sock.h
Original file line number Diff line number Diff line change
Expand Up @@ -1397,22 +1397,48 @@ static inline bool sk_under_memory_pressure(const struct sock *sk)
return !!*sk->sk_prot->memory_pressure;
}

static inline long
proto_memory_allocated(const struct proto *prot)
{
return max(0L, atomic_long_read(prot->memory_allocated));
}

static inline long
sk_memory_allocated(const struct sock *sk)
{
return atomic_long_read(sk->sk_prot->memory_allocated);
return proto_memory_allocated(sk->sk_prot);
}

/* 1 MB per cpu, in page units */
#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))

static inline long
sk_memory_allocated_add(struct sock *sk, int amt)
{
return atomic_long_add_return(amt, sk->sk_prot->memory_allocated);
int local_reserve;

preempt_disable();
local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
if (local_reserve >= SK_MEMORY_PCPU_RESERVE) {
__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
}
preempt_enable();
return sk_memory_allocated(sk);
}

static inline void
sk_memory_allocated_sub(struct sock *sk, int amt)
{
atomic_long_sub(amt, sk->sk_prot->memory_allocated);
int local_reserve;

preempt_disable();
local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
if (local_reserve <= -SK_MEMORY_PCPU_RESERVE) {
__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
}
preempt_enable();
}

#define SK_ALLOC_PERCPU_COUNTER_BATCH 16
Expand Down Expand Up @@ -1441,12 +1467,6 @@ proto_sockets_allocated_sum_positive(struct proto *prot)
return percpu_counter_sum_positive(prot->sockets_allocated);
}

static inline long
proto_memory_allocated(struct proto *prot)
{
return atomic_long_read(prot->memory_allocated);
}

static inline bool
proto_memory_pressure(struct proto *prot)
{
Expand Down

0 comments on commit 3cd3399

Please sign in to comment.