Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 66943
b: refs/heads/master
c: f0703c8
h: refs/heads/master
i:
  66941: 40a8f2f
  66939: cda67da
  66935: 34f69b2
  66927: 58f0d1a
  66911: f36fe6a
  66879: 2566954
  66815: df0a3b0
v: v3
  • Loading branch information
Ursula Braun authored and David S. Miller committed Oct 10, 2007
1 parent aa4ec44 commit 43fe091
Show file tree
Hide file tree
Showing 3 changed files with 133 additions and 87 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 57f20448032158ad00b1e74f479515c689998be9
refs/heads/master: f0703c80e5156406ad947cb67fe277725b48080f
7 changes: 7 additions & 0 deletions trunk/include/net/iucv/af_iucv.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,12 @@ struct sockaddr_iucv {


/* Common socket structures and functions */
struct sock_msg_q {
struct iucv_path *path;
struct iucv_message msg;
struct list_head list;
spinlock_t lock;
};

#define iucv_sk(__sk) ((struct iucv_sock *) __sk)

Expand All @@ -65,6 +71,7 @@ struct iucv_sock {
struct iucv_path *path;
struct sk_buff_head send_skb_q;
struct sk_buff_head backlog_skb_q;
struct sock_msg_q message_q;
unsigned int send_tag;
};

Expand Down
211 changes: 125 additions & 86 deletions trunk/net/iucv/af_iucv.c
Original file line number Diff line number Diff line change
Expand Up @@ -224,6 +224,8 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
spin_lock_init(&iucv_sk(sk)->accept_q_lock);
skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
spin_lock_init(&iucv_sk(sk)->message_q.lock);
skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
iucv_sk(sk)->send_tag = 0;

Expand Down Expand Up @@ -673,6 +675,90 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
return err;
}

static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
{
int dataleft, size, copied = 0;
struct sk_buff *nskb;

dataleft = len;
while (dataleft) {
if (dataleft >= sk->sk_rcvbuf / 4)
size = sk->sk_rcvbuf / 4;
else
size = dataleft;

nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
if (!nskb)
return -ENOMEM;

memcpy(nskb->data, skb->data + copied, size);
copied += size;
dataleft -= size;

skb_reset_transport_header(nskb);
skb_reset_network_header(nskb);
nskb->len = size;

skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
}

return 0;
}

static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
struct iucv_path *path,
struct iucv_message *msg)
{
int rc;

if (msg->flags & IPRMDATA) {
skb->data = NULL;
skb->len = 0;
} else {
rc = iucv_message_receive(path, msg, 0, skb->data,
msg->length, NULL);
if (rc) {
kfree_skb(skb);
return;
}
if (skb->truesize >= sk->sk_rcvbuf / 4) {
rc = iucv_fragment_skb(sk, skb, msg->length);
kfree_skb(skb);
skb = NULL;
if (rc) {
iucv_path_sever(path, NULL);
return;
}
skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
} else {
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb->len = msg->length;
}
}

if (sock_queue_rcv_skb(sk, skb))
skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
}

static void iucv_process_message_q(struct sock *sk)
{
struct iucv_sock *iucv = iucv_sk(sk);
struct sk_buff *skb;
struct sock_msg_q *p, *n;

list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA);
if (!skb)
break;
iucv_process_message(sk, skb, p->path, &p->msg);
list_del(&p->list);
kfree(p);
if (!skb_queue_empty(&iucv->backlog_skb_q))
break;
}
}

static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
Expand All @@ -684,8 +770,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
int err = 0;

if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
skb_queue_empty(&iucv->backlog_skb_q) &&
skb_queue_empty(&sk->sk_receive_queue))
skb_queue_empty(&iucv->backlog_skb_q) &&
skb_queue_empty(&sk->sk_receive_queue) &&
list_empty(&iucv->message_q.list))
return 0;

if (flags & (MSG_OOB))
Expand Down Expand Up @@ -724,16 +811,23 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
kfree_skb(skb);

/* Queue backlog skbs */
rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
rskb = skb_dequeue(&iucv->backlog_skb_q);
while (rskb) {
if (sock_queue_rcv_skb(sk, rskb)) {
skb_queue_head(&iucv_sk(sk)->backlog_skb_q,
skb_queue_head(&iucv->backlog_skb_q,
rskb);
break;
} else {
rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
rskb = skb_dequeue(&iucv->backlog_skb_q);
}
}
if (skb_queue_empty(&iucv->backlog_skb_q)) {
spin_lock_bh(&iucv->message_q.lock);
if (!list_empty(&iucv->message_q.list))
iucv_process_message_q(sk);
spin_unlock_bh(&iucv->message_q.lock);
}

} else
skb_queue_head(&sk->sk_receive_queue, skb);

Expand Down Expand Up @@ -975,99 +1069,44 @@ static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
sk->sk_state_change(sk);
}

static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len,
struct sk_buff_head *fragmented_skb_q)
{
int dataleft, size, copied = 0;
struct sk_buff *nskb;

dataleft = len;
while (dataleft) {
if (dataleft >= sk->sk_rcvbuf / 4)
size = sk->sk_rcvbuf / 4;
else
size = dataleft;

nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
if (!nskb)
return -ENOMEM;

memcpy(nskb->data, skb->data + copied, size);
copied += size;
dataleft -= size;

skb_reset_transport_header(nskb);
skb_reset_network_header(nskb);
nskb->len = size;

skb_queue_tail(fragmented_skb_q, nskb);
}

return 0;
}

static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
{
struct sock *sk = path->private;
struct iucv_sock *iucv = iucv_sk(sk);
struct sk_buff *skb, *fskb;
struct sk_buff_head fragmented_skb_q;
int rc;

skb_queue_head_init(&fragmented_skb_q);
struct sk_buff *skb;
struct sock_msg_q *save_msg;
int len;

if (sk->sk_shutdown & RCV_SHUTDOWN)
return;

if (!list_empty(&iucv->message_q.list) ||
!skb_queue_empty(&iucv->backlog_skb_q))
goto save_message;

len = atomic_read(&sk->sk_rmem_alloc);
len += msg->length + sizeof(struct sk_buff);
if (len > sk->sk_rcvbuf)
goto save_message;

skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
if (!skb) {
iucv_path_sever(path, NULL);
return;
}
if (!skb)
goto save_message;

if (msg->flags & IPRMDATA) {
skb->data = NULL;
skb->len = 0;
} else {
rc = iucv_message_receive(path, msg, 0, skb->data,
msg->length, NULL);
if (rc) {
kfree_skb(skb);
return;
}
if (skb->truesize >= sk->sk_rcvbuf / 4) {
rc = iucv_fragment_skb(sk, skb, msg->length,
&fragmented_skb_q);
kfree_skb(skb);
skb = NULL;
if (rc) {
iucv_path_sever(path, NULL);
return;
}
} else {
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb->len = msg->length;
}
}
/* Queue the fragmented skb */
fskb = skb_dequeue(&fragmented_skb_q);
while (fskb) {
if (!skb_queue_empty(&iucv->backlog_skb_q))
skb_queue_tail(&iucv->backlog_skb_q, fskb);
else if (sock_queue_rcv_skb(sk, fskb))
skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, fskb);
fskb = skb_dequeue(&fragmented_skb_q);
}
spin_lock(&iucv->message_q.lock);
iucv_process_message(sk, skb, path, msg);
spin_unlock(&iucv->message_q.lock);

/* Queue the original skb if it exists (was not fragmented) */
if (skb) {
if (!skb_queue_empty(&iucv->backlog_skb_q))
skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
else if (sock_queue_rcv_skb(sk, skb))
skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
}
return;

save_message:
save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
save_msg->path = path;
save_msg->msg = *msg;

spin_lock(&iucv->message_q.lock);
list_add_tail(&save_msg->list, &iucv->message_q.list);
spin_unlock(&iucv->message_q.lock);
}

static void iucv_callback_txdone(struct iucv_path *path,
Expand Down

0 comments on commit 43fe091

Please sign in to comment.