Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 369323
b: refs/heads/master
c: f9c2288
h: refs/heads/master
i:
  369321: a8be405
  369319: decd9d1
v: v3
  • Loading branch information
Patrick McHardy authored and David S. Miller committed Apr 19, 2013
1 parent 2cfef3a commit ba98688
Show file tree
Hide file tree
Showing 3 changed files with 144 additions and 4 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 5fd96123ee19b96be7d7b57fd42227e1a146ef05
refs/heads/master: f9c2288837ba072b21dba955f04a4c97eaa77b1e
2 changes: 2 additions & 0 deletions trunk/include/linux/netlink.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,8 @@ extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group)
extern void netlink_clear_multicast_users(struct sock *sk, unsigned int group);
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
extern struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
u32 dst_portid, gfp_t gfp_mask);
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
__u32 group, gfp_t allocation);
Expand Down
144 changes: 141 additions & 3 deletions trunk/net/netlink/af_netlink.c
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,11 @@ static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
}

static bool netlink_rx_is_mmaped(struct sock *sk)
{
return nlk_sk(sk)->rx_ring.pg_vec != NULL;
}

static bool netlink_tx_is_mmaped(struct sock *sk)
{
return nlk_sk(sk)->tx_ring.pg_vec != NULL;
Expand Down Expand Up @@ -589,8 +594,54 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
mutex_unlock(&nlk->pg_vec_lock);
return err;
}

static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
{
struct nl_mmap_hdr *hdr;

hdr = netlink_mmap_hdr(skb);
hdr->nm_len = skb->len;
hdr->nm_group = NETLINK_CB(skb).dst_group;
hdr->nm_pid = NETLINK_CB(skb).creds.pid;
hdr->nm_uid = NETLINK_CB(skb).creds.uid;
hdr->nm_gid = NETLINK_CB(skb).creds.gid;
netlink_frame_flush_dcache(hdr);
netlink_set_status(hdr, NL_MMAP_STATUS_VALID);

NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
kfree_skb(skb);
}

static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
{
struct netlink_sock *nlk = nlk_sk(sk);
struct netlink_ring *ring = &nlk->rx_ring;
struct nl_mmap_hdr *hdr;

spin_lock_bh(&sk->sk_receive_queue.lock);
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
if (hdr == NULL) {
spin_unlock_bh(&sk->sk_receive_queue.lock);
kfree_skb(skb);
sk->sk_err = ENOBUFS;
sk->sk_error_report(sk);
return;
}
netlink_increment_head(ring);
__skb_queue_tail(&sk->sk_receive_queue, skb);
spin_unlock_bh(&sk->sk_receive_queue.lock);

hdr->nm_len = skb->len;
hdr->nm_group = NETLINK_CB(skb).dst_group;
hdr->nm_pid = NETLINK_CB(skb).creds.pid;
hdr->nm_uid = NETLINK_CB(skb).creds.uid;
hdr->nm_gid = NETLINK_CB(skb).creds.gid;
netlink_set_status(hdr, NL_MMAP_STATUS_COPY);
}

#else /* CONFIG_NETLINK_MMAP */
#define netlink_skb_is_mmaped(skb) false
#define netlink_rx_is_mmaped(sk) false
#define netlink_tx_is_mmaped(sk) false
#define netlink_mmap sock_no_mmap
#define netlink_poll datagram_poll
Expand Down Expand Up @@ -1381,7 +1432,14 @@ static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
{
int len = skb->len;

skb_queue_tail(&sk->sk_receive_queue, skb);
#ifdef CONFIG_NETLINK_MMAP
if (netlink_skb_is_mmaped(skb))
netlink_queue_mmaped_skb(sk, skb);
else if (netlink_rx_is_mmaped(sk))
netlink_ring_set_copied(sk, skb);
else
#endif /* CONFIG_NETLINK_MMAP */
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk, len);
return len;
}
Expand Down Expand Up @@ -1492,6 +1550,68 @@ int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
}
EXPORT_SYMBOL(netlink_unicast);

struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
u32 dst_portid, gfp_t gfp_mask)
{
#ifdef CONFIG_NETLINK_MMAP
struct sock *sk = NULL;
struct sk_buff *skb;
struct netlink_ring *ring;
struct nl_mmap_hdr *hdr;
unsigned int maxlen;

sk = netlink_getsockbyportid(ssk, dst_portid);
if (IS_ERR(sk))
goto out;

ring = &nlk_sk(sk)->rx_ring;
/* fast-path without atomic ops for common case: non-mmaped receiver */
if (ring->pg_vec == NULL)
goto out_put;

skb = alloc_skb_head(gfp_mask);
if (skb == NULL)
goto err1;

spin_lock_bh(&sk->sk_receive_queue.lock);
/* check again under lock */
if (ring->pg_vec == NULL)
goto out_free;

maxlen = ring->frame_size - NL_MMAP_HDRLEN;
if (maxlen < size)
goto out_free;

netlink_forward_ring(ring);
hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
if (hdr == NULL)
goto err2;
netlink_ring_setup_skb(skb, sk, ring, hdr);
netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
atomic_inc(&ring->pending);
netlink_increment_head(ring);

spin_unlock_bh(&sk->sk_receive_queue.lock);
return skb;

err2:
kfree_skb(skb);
spin_unlock_bh(&sk->sk_receive_queue.lock);
err1:
sock_put(sk);
return NULL;

out_free:
kfree_skb(skb);
spin_unlock_bh(&sk->sk_receive_queue.lock);
out_put:
sock_put(sk);
out:
#endif
return alloc_skb(size, gfp_mask);
}
EXPORT_SYMBOL_GPL(netlink_alloc_skb);

int netlink_has_listeners(struct sock *sk, unsigned int group)
{
int res = 0;
Expand Down Expand Up @@ -2270,9 +2390,13 @@ static int netlink_dump(struct sock *sk)

alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);

skb = sock_rmalloc(sk, alloc_size, 0, GFP_KERNEL);
if (!netlink_rx_is_mmaped(sk) &&
atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
goto errout_skb;
skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, GFP_KERNEL);
if (!skb)
goto errout_skb;
netlink_skb_set_owner_r(skb, sk);

len = cb->dump(skb, cb);

Expand Down Expand Up @@ -2327,6 +2451,19 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
if (cb == NULL)
return -ENOBUFS;

/* Memory mapped dump requests need to be copied to avoid looping
* on the pending state in netlink_mmap_sendmsg() while the CB hold
* a reference to the skb.
*/
if (netlink_skb_is_mmaped(skb)) {
skb = skb_copy(skb, GFP_KERNEL);
if (skb == NULL) {
kfree(cb);
return -ENOBUFS;
}
} else
atomic_inc(&skb->users);

cb->dump = control->dump;
cb->done = control->done;
cb->nlh = nlh;
Expand Down Expand Up @@ -2387,7 +2524,8 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
if (err)
payload += nlmsg_len(nlh);

skb = nlmsg_new(payload, GFP_KERNEL);
skb = netlink_alloc_skb(in_skb->sk, nlmsg_total_size(payload),
NETLINK_CB(in_skb).portid, GFP_KERNEL);
if (!skb) {
struct sock *sk;

Expand Down

0 comments on commit ba98688

Please sign in to comment.