Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 188875
b: refs/heads/master
c: 73852e8
h: refs/heads/master
i:
  188873: 3e20c7d
  188871: 1c7f78e
v: v3
  • Loading branch information
Steven J. Magnani authored and David S. Miller committed Mar 20, 2010
1 parent 929f299 commit 3859cba
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 21 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f5d410f2ea7ba340f11815a56e05b9fa9421c421
refs/heads/master: 73852e8151b7d7a529fbe019ab6d2d0c02d8f3f2
63 changes: 43 additions & 20 deletions trunk/net/ipv4/tcp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1254,6 +1254,39 @@ static void tcp_prequeue_process(struct sock *sk)
tp->ucopy.memory = 0;
}

#ifdef CONFIG_NET_DMA
static void tcp_service_net_dma(struct sock *sk, bool wait)
{
dma_cookie_t done, used;
dma_cookie_t last_issued;
struct tcp_sock *tp = tcp_sk(sk);

if (!tp->ucopy.dma_chan)
return;

last_issued = tp->ucopy.dma_cookie;
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);

do {
if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
last_issued, &done,
&used) == DMA_SUCCESS) {
/* Safe to free early-copied skbs now */
__skb_queue_purge(&sk->sk_async_wait_queue);
break;
} else {
struct sk_buff *skb;
while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
(dma_async_is_complete(skb->dma_cookie, done,
used) == DMA_SUCCESS)) {
__skb_dequeue(&sk->sk_async_wait_queue);
kfree_skb(skb);
}
}
} while (wait);
}
#endif

static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
{
struct sk_buff *skb;
Expand Down Expand Up @@ -1546,6 +1579,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
/* __ Set realtime policy in scheduler __ */
}

#ifdef CONFIG_NET_DMA
if (tp->ucopy.dma_chan)
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
#endif
if (copied >= target) {
/* Do not sleep, just process backlog. */
release_sock(sk);
Expand All @@ -1554,6 +1591,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
sk_wait_data(sk, &timeo);

#ifdef CONFIG_NET_DMA
tcp_service_net_dma(sk, false); /* Don't block */
tp->ucopy.wakeup = 0;
#endif

Expand Down Expand Up @@ -1633,6 +1671,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
copied = -EFAULT;
break;
}

dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);

if ((offset + used) == skb->len)
copied_early = 1;

Expand Down Expand Up @@ -1702,27 +1743,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
}

#ifdef CONFIG_NET_DMA
if (tp->ucopy.dma_chan) {
dma_cookie_t done, used;

dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);

while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
tp->ucopy.dma_cookie, &done,
&used) == DMA_IN_PROGRESS) {
/* do partial cleanup of sk_async_wait_queue */
while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
(dma_async_is_complete(skb->dma_cookie, done,
used) == DMA_SUCCESS)) {
__skb_dequeue(&sk->sk_async_wait_queue);
kfree_skb(skb);
}
}
tcp_service_net_dma(sk, true); /* Wait for queue to drain */
tp->ucopy.dma_chan = NULL;

/* Safe to free early-copied skbs now */
__skb_queue_purge(&sk->sk_async_wait_queue);
tp->ucopy.dma_chan = NULL;
}
if (tp->ucopy.pinned_list) {
dma_unpin_iovec_pages(tp->ucopy.pinned_list);
tp->ucopy.pinned_list = NULL;
Expand Down

0 comments on commit 3859cba

Please sign in to comment.