diff --git a/[refs] b/[refs] index 3051150b0bf1..4178ceae4344 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 79af02c2538d54ff0dcd3f43646f506207f2ee62 +refs/heads/master: dcc83a028552ac34538db52d82446d1da6ea8c22 diff --git a/trunk/arch/sparc64/kernel/dtlb_backend.S b/trunk/arch/sparc64/kernel/dtlb_backend.S index b73a3c858770..538522848ad4 100644 --- a/trunk/arch/sparc64/kernel/dtlb_backend.S +++ b/trunk/arch/sparc64/kernel/dtlb_backend.S @@ -16,7 +16,7 @@ #elif PAGE_SHIFT == 19 #define SZ_BITS _PAGE_SZ512K #elif PAGE_SHIFT == 22 -#define SZ_BITS _PAGE_SZ4M +#define SZ_BITS _PAGE_SZ4MB #endif #define VALID_SZ_BITS (_PAGE_VALID | SZ_BITS) diff --git a/trunk/drivers/bluetooth/hci_vhci.c b/trunk/drivers/bluetooth/hci_vhci.c index f9b956fb2b8b..3256192dcde8 100644 --- a/trunk/drivers/bluetooth/hci_vhci.c +++ b/trunk/drivers/bluetooth/hci_vhci.c @@ -120,7 +120,7 @@ static unsigned int hci_vhci_chr_poll(struct file *file, poll_table * wait) poll_wait(file, &hci_vhci->read_wait, wait); - if (!skb_queue_empty(&hci_vhci->readq)) + if (skb_queue_len(&hci_vhci->readq)) return POLLIN | POLLRDNORM; return POLLOUT | POLLWRNORM; diff --git a/trunk/drivers/isdn/hisax/isdnl1.c b/trunk/drivers/isdn/hisax/isdnl1.c index bab356886483..ac899503a74f 100644 --- a/trunk/drivers/isdn/hisax/isdnl1.c +++ b/trunk/drivers/isdn/hisax/isdnl1.c @@ -279,8 +279,7 @@ BChannel_proc_xmt(struct BCState *bcs) if (test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags)) st->l1.l1l2(st, PH_PULL | CONFIRM, NULL); if (!test_bit(BC_FLG_ACTIV, &bcs->Flag)) { - if (!test_bit(BC_FLG_BUSY, &bcs->Flag) && - skb_queue_empty(&bcs->squeue)) { + if (!test_bit(BC_FLG_BUSY, &bcs->Flag) && (!skb_queue_len(&bcs->squeue))) { st->l2.l2l1(st, PH_DEACTIVATE | CONFIRM, NULL); } } diff --git a/trunk/drivers/isdn/hisax/isdnl2.c b/trunk/drivers/isdn/hisax/isdnl2.c index 1615c1a76ab8..9022583fd6a0 100644 --- a/trunk/drivers/isdn/hisax/isdnl2.c +++ b/trunk/drivers/isdn/hisax/isdnl2.c @@ -108,8 +108,7 @@ static int l2addrsize(struct Layer2 *l2); static void set_peer_busy(struct Layer2 *l2) { test_and_set_bit(FLG_PEER_BUSY, &l2->flag); - if (!skb_queue_empty(&l2->i_queue) || - !skb_queue_empty(&l2->ui_queue)) + if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue)) test_and_set_bit(FLG_L2BLOCK, &l2->flag); } @@ -755,7 +754,7 @@ l2_restart_multi(struct FsmInst *fi, int event, void *arg) st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL); if ((ST_L2_7==state) || (ST_L2_8 == state)) - if (!skb_queue_empty(&st->l2.i_queue) && cansend(st)) + if (skb_queue_len(&st->l2.i_queue) && cansend(st)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } @@ -811,7 +810,7 @@ l2_connected(struct FsmInst *fi, int event, void *arg) if (pr != -1) st->l2.l2l3(st, pr, NULL); - if (!skb_queue_empty(&st->l2.i_queue) && cansend(st)) + if (skb_queue_len(&st->l2.i_queue) && cansend(st)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } @@ -1015,7 +1014,7 @@ l2_st7_got_super(struct FsmInst *fi, int event, void *arg) if(typ != RR) FsmDelTimer(&st->l2.t203, 9); restart_t200(st, 12); } - if (!skb_queue_empty(&st->l2.i_queue) && (typ == RR)) + if (skb_queue_len(&st->l2.i_queue) && (typ == RR)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } else nrerrorrecovery(fi); @@ -1121,7 +1120,7 @@ l2_got_iframe(struct FsmInst *fi, int event, void *arg) return; } - if (!skb_queue_empty(&st->l2.i_queue) && (fi->state == ST_L2_7)) + if (skb_queue_len(&st->l2.i_queue) && (fi->state == ST_L2_7)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); if (test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag)) enquiry_cr(st, RR, RSP, 0); @@ -1139,7 +1138,7 @@ l2_got_tei(struct FsmInst *fi, int event, void *arg) test_and_set_bit(FLG_L3_INIT, &st->l2.flag); } else FsmChangeState(fi, ST_L2_4); - if (!skb_queue_empty(&st->l2.ui_queue)) + if (skb_queue_len(&st->l2.ui_queue)) tx_ui(st); } @@ -1302,7 +1301,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) FsmDelTimer(&st->l2.t203, 13); FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 11); } - if (!skb_queue_empty(&l2->i_queue) && cansend(st)) + if (skb_queue_len(&l2->i_queue) && cansend(st)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } @@ -1348,7 +1347,7 @@ l2_st8_got_super(struct FsmInst *fi, int event, void *arg) } invoke_retransmission(st, nr); FsmChangeState(fi, ST_L2_7); - if (!skb_queue_empty(&l2->i_queue) && cansend(st)) + if (skb_queue_len(&l2->i_queue) && cansend(st)) st->l2.l2l1(st, PH_PULL | REQUEST, NULL); } else nrerrorrecovery(fi); diff --git a/trunk/drivers/isdn/hisax/isdnl3.c b/trunk/drivers/isdn/hisax/isdnl3.c index c9917cd2132b..abcc9530eb34 100644 --- a/trunk/drivers/isdn/hisax/isdnl3.c +++ b/trunk/drivers/isdn/hisax/isdnl3.c @@ -302,7 +302,7 @@ release_l3_process(struct l3_process *p) !test_bit(FLG_PTP, &p->st->l2.flag)) { if (p->debug) l3_debug(p->st, "release_l3_process: last process"); - if (skb_queue_empty(&p->st->l3.squeue)) { + if (!skb_queue_len(&p->st->l3.squeue)) { if (p->debug) l3_debug(p->st, "release_l3_process: release link"); if (p->st->protocol != ISDN_PTYPE_NI1) diff --git a/trunk/drivers/isdn/i4l/isdn_tty.c b/trunk/drivers/isdn/i4l/isdn_tty.c index b37ef1f06b3d..ad5aa38fb5a6 100644 --- a/trunk/drivers/isdn/i4l/isdn_tty.c +++ b/trunk/drivers/isdn/i4l/isdn_tty.c @@ -1223,7 +1223,7 @@ isdn_tty_write(struct tty_struct *tty, const u_char * buf, int count) total += c; } atomic_dec(&info->xmit_lock); - if ((info->xmit_count) || !skb_queue_empty(&info->xmit_queue)) { + if ((info->xmit_count) || (skb_queue_len(&info->xmit_queue))) { if (m->mdmreg[REG_DXMT] & BIT_DXMT) { isdn_tty_senddown(info); isdn_tty_tint(info); @@ -1284,7 +1284,7 @@ isdn_tty_flush_chars(struct tty_struct *tty) if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_flush_chars")) return; - if ((info->xmit_count) || !skb_queue_empty(&info->xmit_queue)) + if ((info->xmit_count) || (skb_queue_len(&info->xmit_queue))) isdn_timer_ctrl(ISDN_TIMER_MODEMXMIT, 1); } diff --git a/trunk/drivers/isdn/icn/icn.c b/trunk/drivers/isdn/icn/icn.c index e0d1b01cc74c..9fc0c1e03732 100644 --- a/trunk/drivers/isdn/icn/icn.c +++ b/trunk/drivers/isdn/icn/icn.c @@ -304,12 +304,12 @@ icn_pollbchan_send(int channel, icn_card * card) isdn_ctrl cmd; if (!(card->sndcount[channel] || card->xskb[channel] || - !skb_queue_empty(&card->spqueue[channel]))) + skb_queue_len(&card->spqueue[channel]))) return; if (icn_trymaplock_channel(card, mch)) { while (sbfree && (card->sndcount[channel] || - !skb_queue_empty(&card->spqueue[channel]) || + skb_queue_len(&card->spqueue[channel]) || card->xskb[channel])) { spin_lock_irqsave(&card->lock, flags); if (card->xmit_lock[channel]) { diff --git a/trunk/drivers/net/hamradio/scc.c b/trunk/drivers/net/hamradio/scc.c index c27e417f32bf..ece1b1a13186 100644 --- a/trunk/drivers/net/hamradio/scc.c +++ b/trunk/drivers/net/hamradio/scc.c @@ -304,7 +304,7 @@ static inline void scc_discard_buffers(struct scc_channel *scc) scc->tx_buff = NULL; } - while (!skb_queue_empty(&scc->tx_queue)) + while (skb_queue_len(&scc->tx_queue)) dev_kfree_skb(skb_dequeue(&scc->tx_queue)); spin_unlock_irqrestore(&scc->lock, flags); @@ -1126,7 +1126,8 @@ static void t_dwait(unsigned long channel) if (scc->stat.tx_state == TXS_WAIT) /* maxkeyup or idle timeout */ { - if (skb_queue_empty(&scc->tx_queue)) { /* nothing to send */ + if (skb_queue_len(&scc->tx_queue) == 0) /* nothing to send */ + { scc->stat.tx_state = TXS_IDLE; netif_wake_queue(scc->dev); /* t_maxkeyup locked it. */ return; diff --git a/trunk/drivers/net/ppp_async.c b/trunk/drivers/net/ppp_async.c index 59e8183c639e..5e48b9ab3045 100644 --- a/trunk/drivers/net/ppp_async.c +++ b/trunk/drivers/net/ppp_async.c @@ -364,7 +364,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, spin_lock_irqsave(&ap->recv_lock, flags); ppp_async_input(ap, buf, cflags, count); spin_unlock_irqrestore(&ap->recv_lock, flags); - if (!skb_queue_empty(&ap->rqueue)) + if (skb_queue_len(&ap->rqueue)) tasklet_schedule(&ap->tsk); ap_put(ap); if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) diff --git a/trunk/drivers/net/ppp_generic.c b/trunk/drivers/net/ppp_generic.c index a32668e88e09..ab726ab43798 100644 --- a/trunk/drivers/net/ppp_generic.c +++ b/trunk/drivers/net/ppp_generic.c @@ -1237,8 +1237,8 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) pch = list_entry(list, struct channel, clist); navail += pch->avail = (pch->chan != NULL); if (pch->avail) { - if (skb_queue_empty(&pch->file.xq) || - !pch->had_frag) { + if (skb_queue_len(&pch->file.xq) == 0 + || !pch->had_frag) { pch->avail = 2; ++nfree; } @@ -1374,8 +1374,8 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) /* try to send it down the channel */ chan = pch->chan; - if (!skb_queue_empty(&pch->file.xq) || - !chan->ops->start_xmit(chan, frag)) + if (skb_queue_len(&pch->file.xq) + || !chan->ops->start_xmit(chan, frag)) skb_queue_tail(&pch->file.xq, frag); pch->had_frag = 1; p += flen; @@ -1412,7 +1412,7 @@ ppp_channel_push(struct channel *pch) spin_lock_bh(&pch->downl); if (pch->chan != 0) { - while (!skb_queue_empty(&pch->file.xq)) { + while (skb_queue_len(&pch->file.xq) > 0) { skb = skb_dequeue(&pch->file.xq); if (!pch->chan->ops->start_xmit(pch->chan, skb)) { /* put the packet back and try again later */ @@ -1426,7 +1426,7 @@ ppp_channel_push(struct channel *pch) } spin_unlock_bh(&pch->downl); /* see if there is anything from the attached unit to be sent */ - if (skb_queue_empty(&pch->file.xq)) { + if (skb_queue_len(&pch->file.xq) == 0) { read_lock_bh(&pch->upl); ppp = pch->ppp; if (ppp != 0) diff --git a/trunk/drivers/net/ppp_synctty.c b/trunk/drivers/net/ppp_synctty.c index 4d51c0c8023d..fd9f50180355 100644 --- a/trunk/drivers/net/ppp_synctty.c +++ b/trunk/drivers/net/ppp_synctty.c @@ -406,7 +406,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf, spin_lock_irqsave(&ap->recv_lock, flags); ppp_sync_input(ap, buf, cflags, count); spin_unlock_irqrestore(&ap->recv_lock, flags); - if (!skb_queue_empty(&ap->rqueue)) + if (skb_queue_len(&ap->rqueue)) tasklet_schedule(&ap->tsk); sp_put(ap); if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) diff --git a/trunk/drivers/net/tun.c b/trunk/drivers/net/tun.c index effab0b9adca..7bfee366297b 100644 --- a/trunk/drivers/net/tun.c +++ b/trunk/drivers/net/tun.c @@ -215,7 +215,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait) poll_wait(file, &tun->read_wait, wait); - if (!skb_queue_empty(&tun->readq)) + if (skb_queue_len(&tun->readq)) mask |= POLLIN | POLLRDNORM; return mask; diff --git a/trunk/drivers/net/wireless/airo.c b/trunk/drivers/net/wireless/airo.c index 47f3c5d0203d..c12648d8192b 100644 --- a/trunk/drivers/net/wireless/airo.c +++ b/trunk/drivers/net/wireless/airo.c @@ -2374,7 +2374,7 @@ void stop_airo_card( struct net_device *dev, int freeres ) /* * Clean out tx queue */ - if (test_bit(FLAG_MPI, &ai->flags) && !skb_queue_empty(&ai->txq)) { + if (test_bit(FLAG_MPI, &ai->flags) && skb_queue_len (&ai->txq) > 0) { struct sk_buff *skb = NULL; for (;(skb = skb_dequeue(&ai->txq));) dev_kfree_skb(skb); @@ -3287,7 +3287,7 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) if (status & EV_TXEXC) get_tx_error(apriv, -1); spin_lock_irqsave(&apriv->aux_lock, flags); - if (!skb_queue_empty(&apriv->txq)) { + if (skb_queue_len (&apriv->txq)) { spin_unlock_irqrestore(&apriv->aux_lock,flags); mpi_send_packet (dev); } else { diff --git a/trunk/drivers/s390/net/claw.c b/trunk/drivers/s390/net/claw.c index 24c0af49c25c..60440dbe3a27 100644 --- a/trunk/drivers/s390/net/claw.c +++ b/trunk/drivers/s390/net/claw.c @@ -428,7 +428,7 @@ claw_pack_skb(struct claw_privbk *privptr) new_skb = NULL; /* assume no dice */ pkt_cnt = 0; CLAW_DBF_TEXT(4,trace,"PackSKBe"); - if (!skb_queue_empty(&p_ch->collect_queue)) { + if (skb_queue_len(&p_ch->collect_queue) > 0) { /* some data */ held_skb = skb_dequeue(&p_ch->collect_queue); if (p_env->packing != DO_PACKED) @@ -1254,7 +1254,7 @@ claw_write_next ( struct chbk * p_ch ) privptr = (struct claw_privbk *) dev->priv; claw_free_wrt_buf( dev ); if ((privptr->write_free_count > 0) && - !skb_queue_empty(&p_ch->collect_queue)) { + (skb_queue_len(&p_ch->collect_queue) > 0)) { pk_skb = claw_pack_skb(privptr); while (pk_skb != NULL) { rc = claw_hw_tx( pk_skb, dev,1); diff --git a/trunk/drivers/s390/net/ctctty.c b/trunk/drivers/s390/net/ctctty.c index 968f2c113efe..3080393e823d 100644 --- a/trunk/drivers/s390/net/ctctty.c +++ b/trunk/drivers/s390/net/ctctty.c @@ -156,7 +156,7 @@ ctc_tty_readmodem(ctc_tty_info *info) skb_queue_head(&info->rx_queue, skb); else { kfree_skb(skb); - ret = !skb_queue_empty(&info->rx_queue); + ret = skb_queue_len(&info->rx_queue); } } } @@ -530,7 +530,7 @@ ctc_tty_write(struct tty_struct *tty, const u_char * buf, int count) total += c; count -= c; } - if (!skb_queue_empty(&info->tx_queue)) { + if (skb_queue_len(&info->tx_queue)) { info->lsr &= ~UART_LSR_TEMT; tasklet_schedule(&info->tasklet); } @@ -594,7 +594,7 @@ ctc_tty_flush_chars(struct tty_struct *tty) return; if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_chars")) return; - if (tty->stopped || tty->hw_stopped || skb_queue_empty(&info->tx_queue)) + if (tty->stopped || tty->hw_stopped || (!skb_queue_len(&info->tx_queue))) return; tasklet_schedule(&info->tasklet); } diff --git a/trunk/drivers/usb/net/usbnet.c b/trunk/drivers/usb/net/usbnet.c index 576f3b852fce..8a945f4f3693 100644 --- a/trunk/drivers/usb/net/usbnet.c +++ b/trunk/drivers/usb/net/usbnet.c @@ -3227,9 +3227,9 @@ static int usbnet_stop (struct net_device *net) temp = unlink_urbs (dev, &dev->txq) + unlink_urbs (dev, &dev->rxq); // maybe wait for deletions to finish. - while (!skb_queue_empty(&dev->rxq) && - !skb_queue_empty(&dev->txq) && - !skb_queue_empty(&dev->done)) { + while (skb_queue_len (&dev->rxq) + && skb_queue_len (&dev->txq) + && skb_queue_len (&dev->done)) { msleep(UNLINK_TIMEOUT_MS); if (netif_msg_ifdown (dev)) devdbg (dev, "waited for %d urb completions", temp); diff --git a/trunk/include/linux/igmp.h b/trunk/include/linux/igmp.h index 0c31ef0b5bad..390e760a96d3 100644 --- a/trunk/include/linux/igmp.h +++ b/trunk/include/linux/igmp.h @@ -148,6 +148,7 @@ struct ip_sf_socklist struct ip_mc_socklist { struct ip_mc_socklist *next; + int count; struct ip_mreqn multi; unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */ struct ip_sf_socklist *sflist; diff --git a/trunk/include/linux/skbuff.h b/trunk/include/linux/skbuff.h index 5d4a990d5577..14b950413495 100644 --- a/trunk/include/linux/skbuff.h +++ b/trunk/include/linux/skbuff.h @@ -300,26 +300,20 @@ struct sk_buff { #include extern void __kfree_skb(struct sk_buff *skb); -extern struct sk_buff *alloc_skb(unsigned int size, - unsigned int __nocast priority); +extern struct sk_buff *alloc_skb(unsigned int size, int priority); extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, - unsigned int size, - unsigned int __nocast priority); + unsigned int size, int priority); extern void kfree_skbmem(struct sk_buff *skb); -extern struct sk_buff *skb_clone(struct sk_buff *skb, - unsigned int __nocast priority); -extern struct sk_buff *skb_copy(const struct sk_buff *skb, - unsigned int __nocast priority); -extern struct sk_buff *pskb_copy(struct sk_buff *skb, - unsigned int __nocast gfp_mask); +extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority); +extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority); +extern struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask); extern int pskb_expand_head(struct sk_buff *skb, - int nhead, int ntail, - unsigned int __nocast gfp_mask); + int nhead, int ntail, int gfp_mask); extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom); extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, - unsigned int __nocast priority); + int priority); extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad); #define dev_kfree_skb(a) kfree_skb(a) extern void skb_over_panic(struct sk_buff *skb, int len, @@ -470,8 +464,7 @@ static inline int skb_shared(const struct sk_buff *skb) * * NULL is returned on a memory allocation failure. */ -static inline struct sk_buff *skb_share_check(struct sk_buff *skb, - unsigned int __nocast pri) +static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri) { might_sleep_if(pri & __GFP_WAIT); if (skb_shared(skb)) { @@ -1008,7 +1001,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list) * %NULL is returned in there is no free memory. */ static inline struct sk_buff *__dev_alloc_skb(unsigned int length, - unsigned int __nocast gfp_mask) + int gfp_mask) { struct sk_buff *skb = alloc_skb(length + 16, gfp_mask); if (likely(skb)) @@ -1121,8 +1114,8 @@ static inline int skb_can_coalesce(struct sk_buff *skb, int i, * If there is no free memory -ENOMEM is returned, otherwise zero * is returned and the old skb data released. */ -extern int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp); -static inline int skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp) +extern int __skb_linearize(struct sk_buff *skb, int gfp); +static inline int skb_linearize(struct sk_buff *skb, int gfp) { return __skb_linearize(skb, gfp); } diff --git a/trunk/include/net/irda/irda_device.h b/trunk/include/net/irda/irda_device.h index 92c828029cd8..71d6af83b631 100644 --- a/trunk/include/net/irda/irda_device.h +++ b/trunk/include/net/irda/irda_device.h @@ -224,7 +224,7 @@ int irda_device_is_receiving(struct net_device *dev); /* Interface for internal use */ static inline int irda_device_txqueue_empty(const struct net_device *dev) { - return skb_queue_empty(&dev->qdisc->q); + return (skb_queue_len(&dev->qdisc->q) == 0); } int irda_device_set_raw_mode(struct net_device* self, int status); struct net_device *alloc_irdadev(int sizeof_priv); diff --git a/trunk/include/net/sctp/structs.h b/trunk/include/net/sctp/structs.h index 7435528a1747..47727c7cc628 100644 --- a/trunk/include/net/sctp/structs.h +++ b/trunk/include/net/sctp/structs.h @@ -582,6 +582,7 @@ void sctp_datamsg_track(struct sctp_chunk *); void sctp_chunk_fail(struct sctp_chunk *, int error); int sctp_chunk_abandoned(struct sctp_chunk *); + /* RFC2960 1.4 Key Terms * * o Chunk: A unit of information within an SCTP packet, consisting of @@ -591,8 +592,13 @@ int sctp_chunk_abandoned(struct sctp_chunk *); * each chunk as well as a few other header pointers... */ struct sctp_chunk { - struct list_head list; - + /* These first three elements MUST PRECISELY match the first + * three elements of struct sk_buff. This allows us to reuse + * all the skb_* queue management functions. + */ + struct sctp_chunk *next; + struct sctp_chunk *prev; + struct sk_buff_head *list; atomic_t refcnt; /* This is our link to the per-transport transmitted list. */ @@ -711,7 +717,7 @@ struct sctp_packet { __u32 vtag; /* This contains the payload chunks. */ - struct list_head chunk_list; + struct sk_buff_head chunks; /* This is the overhead of the sctp and ip headers. */ size_t overhead; @@ -968,7 +974,7 @@ struct sctp_inq { /* This is actually a queue of sctp_chunk each * containing a partially decoded packet. */ - struct list_head in_chunk_list; + struct sk_buff_head in; /* This is the packet which is currently off the in queue and is * being worked on through the inbound chunk processing. */ @@ -1011,7 +1017,7 @@ struct sctp_outq { struct sctp_association *asoc; /* Data pending that has never been transmitted. */ - struct list_head out_chunk_list; + struct sk_buff_head out; unsigned out_qlen; /* Total length of queued data chunks. */ @@ -1019,7 +1025,7 @@ struct sctp_outq { unsigned error; /* These are control chunks we want to send. */ - struct list_head control_chunk_list; + struct sk_buff_head control; /* These are chunks that have been sacked but are above the * CTSN, or cumulative tsn ack point. @@ -1666,7 +1672,7 @@ struct sctp_association { * which already resides in sctp_outq. Please move this * queue and its supporting logic down there. --piggy] */ - struct list_head addip_chunk_list; + struct sk_buff_head addip_chunks; /* ADDIP Section 4.1 ASCONF Chunk Procedures * diff --git a/trunk/include/net/sock.h b/trunk/include/net/sock.h index a1042d08becd..7b76f891ae2d 100644 --- a/trunk/include/net/sock.h +++ b/trunk/include/net/sock.h @@ -684,17 +684,16 @@ extern void FASTCALL(release_sock(struct sock *sk)); #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock)) #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock)) -extern struct sock *sk_alloc(int family, - unsigned int __nocast priority, +extern struct sock *sk_alloc(int family, int priority, struct proto *prot, int zero_it); extern void sk_free(struct sock *sk); extern struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, - unsigned int __nocast priority); + int priority); extern struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, - unsigned int __nocast priority); + int priority); extern void sock_wfree(struct sk_buff *skb); extern void sock_rfree(struct sk_buff *skb); @@ -709,8 +708,7 @@ extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, int noblock, int *errcode); -extern void *sock_kmalloc(struct sock *sk, int size, - unsigned int __nocast priority); +extern void *sock_kmalloc(struct sock *sk, int size, int priority); extern void sock_kfree_s(struct sock *sk, void *mem, int size); extern void sk_send_sigurg(struct sock *sk); @@ -1134,8 +1132,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) } static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, - int size, int mem, - unsigned int __nocast gfp) + int size, int mem, int gfp) { struct sk_buff *skb; int hdr_len; @@ -1158,8 +1155,7 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk, } static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk, - int size, - unsigned int __nocast gfp) + int size, int gfp) { return sk_stream_alloc_pskb(sk, size, 0, gfp); } @@ -1192,7 +1188,7 @@ static inline int sock_writeable(const struct sock *sk) return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2); } -static inline unsigned int __nocast gfp_any(void) +static inline int gfp_any(void) { return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; } diff --git a/trunk/include/net/tcp.h b/trunk/include/net/tcp.h index f4f9aba07ac2..a166918ca56d 100644 --- a/trunk/include/net/tcp.h +++ b/trunk/include/net/tcp.h @@ -860,8 +860,7 @@ extern void tcp_send_probe0(struct sock *); extern void tcp_send_partial(struct sock *); extern int tcp_write_wakeup(struct sock *); extern void tcp_send_fin(struct sock *sk); -extern void tcp_send_active_reset(struct sock *sk, - unsigned int __nocast priority); +extern void tcp_send_active_reset(struct sock *sk, int priority); extern int tcp_send_synack(struct sock *); extern void tcp_push_one(struct sock *, unsigned int mss_now); extern void tcp_send_ack(struct sock *sk); @@ -992,7 +991,7 @@ static __inline__ void tcp_fast_path_on(struct tcp_sock *tp) static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp) { - if (skb_queue_empty(&tp->out_of_order_queue) && + if (skb_queue_len(&tp->out_of_order_queue) == 0 && tp->rcv_wnd && atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && !tp->urg_data) diff --git a/trunk/net/bluetooth/cmtp/core.c b/trunk/net/bluetooth/cmtp/core.c index 901eff7ebe74..2e341de3e763 100644 --- a/trunk/net/bluetooth/cmtp/core.c +++ b/trunk/net/bluetooth/cmtp/core.c @@ -213,7 +213,7 @@ static int cmtp_send_frame(struct cmtp_session *session, unsigned char *data, in return kernel_sendmsg(sock, &msg, &iv, 1, len); } -static void cmtp_process_transmit(struct cmtp_session *session) +static int cmtp_process_transmit(struct cmtp_session *session) { struct sk_buff *skb, *nskb; unsigned char *hdr; @@ -223,7 +223,7 @@ static void cmtp_process_transmit(struct cmtp_session *session) if (!(nskb = alloc_skb(session->mtu, GFP_ATOMIC))) { BT_ERR("Can't allocate memory for new frame"); - return; + return -ENOMEM; } while ((skb = skb_dequeue(&session->transmit))) { @@ -275,6 +275,8 @@ static void cmtp_process_transmit(struct cmtp_session *session) cmtp_send_frame(session, nskb->data, nskb->len); kfree_skb(nskb); + + return skb_queue_len(&session->transmit); } static int cmtp_session(void *arg) diff --git a/trunk/net/bluetooth/hidp/core.c b/trunk/net/bluetooth/hidp/core.c index de8af5f42394..affbc55462e8 100644 --- a/trunk/net/bluetooth/hidp/core.c +++ b/trunk/net/bluetooth/hidp/core.c @@ -428,7 +428,7 @@ static int hidp_send_frame(struct socket *sock, unsigned char *data, int len) return kernel_sendmsg(sock, &msg, &iv, 1, len); } -static void hidp_process_transmit(struct hidp_session *session) +static int hidp_process_transmit(struct hidp_session *session) { struct sk_buff *skb; @@ -453,6 +453,9 @@ static void hidp_process_transmit(struct hidp_session *session) hidp_set_timer(session); kfree_skb(skb); } + + return skb_queue_len(&session->ctrl_transmit) + + skb_queue_len(&session->intr_transmit); } static int hidp_session(void *arg) diff --git a/trunk/net/bluetooth/rfcomm/sock.c b/trunk/net/bluetooth/rfcomm/sock.c index 63a123c5c41b..f3f6355a2786 100644 --- a/trunk/net/bluetooth/rfcomm/sock.c +++ b/trunk/net/bluetooth/rfcomm/sock.c @@ -590,11 +590,8 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo) for (;;) { set_current_state(TASK_INTERRUPTIBLE); - if (!skb_queue_empty(&sk->sk_receive_queue) || - sk->sk_err || - (sk->sk_shutdown & RCV_SHUTDOWN) || - signal_pending(current) || - !timeo) + if (skb_queue_len(&sk->sk_receive_queue) || sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN) || + signal_pending(current) || !timeo) break; set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); diff --git a/trunk/net/bluetooth/rfcomm/tty.c b/trunk/net/bluetooth/rfcomm/tty.c index 6304590fd36a..6d689200bcf3 100644 --- a/trunk/net/bluetooth/rfcomm/tty.c +++ b/trunk/net/bluetooth/rfcomm/tty.c @@ -781,7 +781,7 @@ static int rfcomm_tty_chars_in_buffer(struct tty_struct *tty) BT_DBG("tty %p dev %p", tty, dev); - if (!skb_queue_empty(&dlc->tx_queue)) + if (skb_queue_len(&dlc->tx_queue)) return dlc->mtu; return 0; diff --git a/trunk/net/core/dev.c b/trunk/net/core/dev.c index ff9dc029233a..7f5f62c65115 100644 --- a/trunk/net/core/dev.c +++ b/trunk/net/core/dev.c @@ -1127,7 +1127,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) extern void skb_release_data(struct sk_buff *); /* Keep head the same: replace data */ -int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp_mask) +int __skb_linearize(struct sk_buff *skb, int gfp_mask) { unsigned int size; u8 *data; diff --git a/trunk/net/core/skbuff.c b/trunk/net/core/skbuff.c index d9f7b06fe886..733deee24b9f 100644 --- a/trunk/net/core/skbuff.c +++ b/trunk/net/core/skbuff.c @@ -129,7 +129,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) * Buffers may only be allocated from interrupts using a @gfp_mask of * %GFP_ATOMIC. */ -struct sk_buff *alloc_skb(unsigned int size, unsigned int __nocast gfp_mask) +struct sk_buff *alloc_skb(unsigned int size, int gfp_mask) { struct sk_buff *skb; u8 *data; @@ -182,8 +182,7 @@ struct sk_buff *alloc_skb(unsigned int size, unsigned int __nocast gfp_mask) * %GFP_ATOMIC. */ struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, - unsigned int size, - unsigned int __nocast gfp_mask) + unsigned int size, int gfp_mask) { struct sk_buff *skb; u8 *data; @@ -323,7 +322,7 @@ void __kfree_skb(struct sk_buff *skb) * %GFP_ATOMIC. */ -struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask) +struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask) { struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); @@ -461,7 +460,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) * header is going to be modified. Use pskb_copy() instead. */ -struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_mask) +struct sk_buff *skb_copy(const struct sk_buff *skb, int gfp_mask) { int headerlen = skb->data - skb->head; /* @@ -500,7 +499,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_ma * The returned buffer has a reference count of 1. */ -struct sk_buff *pskb_copy(struct sk_buff *skb, unsigned int __nocast gfp_mask) +struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask) { /* * Allocate the copy buffer @@ -558,8 +557,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, unsigned int __nocast gfp_mask) * reloaded after call to this function. */ -int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, - unsigned int __nocast gfp_mask) +int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask) { int i; u8 *data; @@ -649,8 +647,7 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) * only by netfilter in the cases when checksum is recalculated? --ANK */ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, - int newheadroom, int newtailroom, - unsigned int __nocast gfp_mask) + int newheadroom, int newtailroom, int gfp_mask) { /* * Allocate the copy buffer diff --git a/trunk/net/core/sock.c b/trunk/net/core/sock.c index 8b35ccdc2b3b..a6ec3ada7f9e 100644 --- a/trunk/net/core/sock.c +++ b/trunk/net/core/sock.c @@ -622,8 +622,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, * @prot: struct proto associated with this new sock instance * @zero_it: if we should zero the newly allocated sock */ -struct sock *sk_alloc(int family, unsigned int __nocast priority, - struct proto *prot, int zero_it) +struct sock *sk_alloc(int family, int priority, struct proto *prot, int zero_it) { struct sock *sk = NULL; kmem_cache_t *slab = prot->slab; @@ -751,8 +750,7 @@ unsigned long sock_i_ino(struct sock *sk) /* * Allocate a skb from the socket's send buffer. */ -struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, - unsigned int __nocast priority) +struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority) { if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { struct sk_buff * skb = alloc_skb(size, priority); @@ -767,8 +765,7 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, /* * Allocate a skb from the socket's receive buffer. */ -struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, - unsigned int __nocast priority) +struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority) { if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { struct sk_buff *skb = alloc_skb(size, priority); @@ -783,7 +780,7 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, /* * Allocate a memory block from the socket's option memory buffer. */ -void *sock_kmalloc(struct sock *sk, int size, unsigned int __nocast priority) +void *sock_kmalloc(struct sock *sk, int size, int priority) { if ((unsigned)size <= sysctl_optmem_max && atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { diff --git a/trunk/net/decnet/af_decnet.c b/trunk/net/decnet/af_decnet.c index 96a02800cd28..29bb3cd21965 100644 --- a/trunk/net/decnet/af_decnet.c +++ b/trunk/net/decnet/af_decnet.c @@ -536,7 +536,7 @@ static void dn_keepalive(struct sock *sk) * we are double checking that we are not sending too * many of these keepalive frames. */ - if (skb_queue_empty(&scp->other_xmit_queue)) + if (skb_queue_len(&scp->other_xmit_queue) == 0) dn_nsp_send_link(sk, DN_NOCHANGE, 0); } @@ -1191,7 +1191,7 @@ static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table struct dn_scp *scp = DN_SK(sk); int mask = datagram_poll(file, sock, wait); - if (!skb_queue_empty(&scp->other_receive_queue)) + if (skb_queue_len(&scp->other_receive_queue)) mask |= POLLRDBAND; return mask; @@ -1214,7 +1214,7 @@ static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) case SIOCATMARK: lock_sock(sk); - val = !skb_queue_empty(&scp->other_receive_queue); + val = (skb_queue_len(&scp->other_receive_queue) != 0); if (scp->state != DN_RUN) val = -ENOTCONN; release_sock(sk); @@ -1630,7 +1630,7 @@ static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int int len = 0; if (flags & MSG_OOB) - return !skb_queue_empty(q) ? 1 : 0; + return skb_queue_len(q) ? 1 : 0; while(skb != (struct sk_buff *)q) { struct dn_skb_cb *cb = DN_SKB_CB(skb); @@ -1707,7 +1707,7 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock, if (sk->sk_err) goto out; - if (!skb_queue_empty(&scp->other_receive_queue)) { + if (skb_queue_len(&scp->other_receive_queue)) { if (!(flags & MSG_OOB)) { msg->msg_flags |= MSG_OOB; if (!scp->other_report) { diff --git a/trunk/net/decnet/dn_nsp_out.c b/trunk/net/decnet/dn_nsp_out.c index 8cce1fdbda90..42abbf3f524f 100644 --- a/trunk/net/decnet/dn_nsp_out.c +++ b/trunk/net/decnet/dn_nsp_out.c @@ -342,8 +342,7 @@ int dn_nsp_xmit_timeout(struct sock *sk) dn_nsp_output(sk); - if (!skb_queue_empty(&scp->data_xmit_queue) || - !skb_queue_empty(&scp->other_xmit_queue)) + if (skb_queue_len(&scp->data_xmit_queue) || skb_queue_len(&scp->other_xmit_queue)) scp->persist = dn_nsp_persist(sk); return 0; diff --git a/trunk/net/ipv4/icmp.c b/trunk/net/ipv4/icmp.c index 279f57abfecb..cb759484979d 100644 --- a/trunk/net/ipv4/icmp.c +++ b/trunk/net/ipv4/icmp.c @@ -970,8 +970,7 @@ int icmp_rcv(struct sk_buff *skb) * RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently * discarded if to broadcast/multicast. */ - if ((icmph->type == ICMP_ECHO || - icmph->type == ICMP_TIMESTAMP) && + if (icmph->type == ICMP_ECHO && sysctl_icmp_echo_ignore_broadcasts) { goto error; } diff --git a/trunk/net/ipv4/igmp.c b/trunk/net/ipv4/igmp.c index 5088f90835ae..1f3183168a90 100644 --- a/trunk/net/ipv4/igmp.c +++ b/trunk/net/ipv4/igmp.c @@ -1615,10 +1615,9 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) { int err; u32 addr = imr->imr_multiaddr.s_addr; - struct ip_mc_socklist *iml=NULL, *i; + struct ip_mc_socklist *iml, *i; struct in_device *in_dev; struct inet_sock *inet = inet_sk(sk); - int ifindex; int count = 0; if (!MULTICAST(addr)) @@ -1634,30 +1633,37 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr) goto done; } + iml = (struct ip_mc_socklist *)sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); + err = -EADDRINUSE; - ifindex = imr->imr_ifindex; for (i = inet->mc_list; i; i = i->next) { - if (i->multi.imr_multiaddr.s_addr == addr && - i->multi.imr_ifindex == ifindex) + if (memcmp(&i->multi, imr, sizeof(*imr)) == 0) { + /* New style additions are reference counted */ + if (imr->imr_address.s_addr == 0) { + i->count++; + err = 0; + } goto done; + } count++; } err = -ENOBUFS; - if (count >= sysctl_igmp_max_memberships) - goto done; - iml = (struct ip_mc_socklist *)sock_kmalloc(sk,sizeof(*iml),GFP_KERNEL); - if (iml == NULL) + if (iml == NULL || count >= sysctl_igmp_max_memberships) goto done; - memcpy(&iml->multi, imr, sizeof(*imr)); iml->next = inet->mc_list; + iml->count = 1; iml->sflist = NULL; iml->sfmode = MCAST_EXCLUDE; inet->mc_list = iml; ip_mc_inc_group(in_dev, addr); + iml = NULL; err = 0; + done: rtnl_shunlock(); + if (iml) + sock_kfree_s(sk, iml, sizeof(*iml)); return err; } @@ -1687,25 +1693,30 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) { struct inet_sock *inet = inet_sk(sk); struct ip_mc_socklist *iml, **imlp; - struct in_device *in_dev; - u32 group = imr->imr_multiaddr.s_addr; - u32 ifindex; rtnl_lock(); - in_dev = ip_mc_find_dev(imr); - if (!in_dev) { - rtnl_unlock(); - return -ENODEV; - } - ifindex = imr->imr_ifindex; for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) { - if (iml->multi.imr_multiaddr.s_addr == group && - iml->multi.imr_ifindex == ifindex) { - (void) ip_mc_leave_src(sk, iml, in_dev); + if (iml->multi.imr_multiaddr.s_addr==imr->imr_multiaddr.s_addr && + iml->multi.imr_address.s_addr==imr->imr_address.s_addr && + (!imr->imr_ifindex || iml->multi.imr_ifindex==imr->imr_ifindex)) { + struct in_device *in_dev; + + in_dev = inetdev_by_index(iml->multi.imr_ifindex); + if (in_dev) + (void) ip_mc_leave_src(sk, iml, in_dev); + if (--iml->count) { + rtnl_unlock(); + if (in_dev) + in_dev_put(in_dev); + return 0; + } *imlp = iml->next; - ip_mc_dec_group(in_dev, group); + if (in_dev) { + ip_mc_dec_group(in_dev, imr->imr_multiaddr.s_addr); + in_dev_put(in_dev); + } rtnl_unlock(); sock_kfree_s(sk, iml, sizeof(*iml)); return 0; @@ -1725,7 +1736,6 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct struct in_device *in_dev = NULL; struct inet_sock *inet = inet_sk(sk); struct ip_sf_socklist *psl; - int leavegroup = 0; int i, j, rv; if (!MULTICAST(addr)) @@ -1745,20 +1755,15 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct err = -EADDRNOTAVAIL; for (pmc=inet->mc_list; pmc; pmc=pmc->next) { - if (pmc->multi.imr_multiaddr.s_addr == imr.imr_multiaddr.s_addr - && pmc->multi.imr_ifindex == imr.imr_ifindex) + if (memcmp(&pmc->multi, mreqs, 2*sizeof(__u32)) == 0) break; } - if (!pmc) { /* must have a prior join */ - err = -EINVAL; + if (!pmc) /* must have a prior join */ goto done; - } /* if a source filter was set, must be the same mode as before */ if (pmc->sflist) { - if (pmc->sfmode != omode) { - err = -EINVAL; + if (pmc->sfmode != omode) goto done; - } } else if (pmc->sfmode != omode) { /* allow mode switches for empty-set filters */ ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0); @@ -1770,7 +1775,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct psl = pmc->sflist; if (!add) { if (!psl) - goto done; /* err = -EADDRNOTAVAIL */ + goto done; rv = !0; for (i=0; isl_count; i++) { rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr, @@ -1779,13 +1784,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct break; } if (rv) /* source not found */ - goto done; /* err = -EADDRNOTAVAIL */ - - /* special case - (INCLUDE, empty) == LEAVE_GROUP */ - if (psl->sl_count == 1 && omode == MCAST_INCLUDE) { - leavegroup = 1; goto done; - } /* update the interface filter */ ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1, @@ -1843,21 +1842,18 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct &mreqs->imr_sourceaddr, 1); done: rtnl_shunlock(); - if (leavegroup) - return ip_mc_leave_group(sk, &imr); return err; } int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) { - int err = 0; + int err; struct ip_mreqn imr; u32 addr = msf->imsf_multiaddr; struct ip_mc_socklist *pmc; struct in_device *in_dev; struct inet_sock *inet = inet_sk(sk); struct ip_sf_socklist *newpsl, *psl; - int leavegroup = 0; if (!MULTICAST(addr)) return -EINVAL; @@ -1876,22 +1872,15 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) err = -ENODEV; goto done; } - - /* special case - (INCLUDE, empty) == LEAVE_GROUP */ - if (msf->imsf_fmode == MCAST_INCLUDE && msf->imsf_numsrc == 0) { - leavegroup = 1; - goto done; - } + err = -EADDRNOTAVAIL; for (pmc=inet->mc_list; pmc; pmc=pmc->next) { if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr && pmc->multi.imr_ifindex == imr.imr_ifindex) break; } - if (!pmc) { /* must have a prior join */ - err = -EINVAL; + if (!pmc) /* must have a prior join */ goto done; - } if (msf->imsf_numsrc) { newpsl = (struct ip_sf_socklist *)sock_kmalloc(sk, IP_SFLSIZE(msf->imsf_numsrc), GFP_KERNEL); @@ -1920,11 +1909,8 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex) 0, NULL, 0); pmc->sflist = newpsl; pmc->sfmode = msf->imsf_fmode; - err = 0; done: rtnl_shunlock(); - if (leavegroup) - err = ip_mc_leave_group(sk, &imr); return err; } diff --git a/trunk/net/ipv4/ip_sockglue.c b/trunk/net/ipv4/ip_sockglue.c index fc7c481d0d79..f8b172f89811 100644 --- a/trunk/net/ipv4/ip_sockglue.c +++ b/trunk/net/ipv4/ip_sockglue.c @@ -677,11 +677,11 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, mreq.imr_address.s_addr = mreqs.imr_interface; mreq.imr_ifindex = 0; err = ip_mc_join_group(sk, &mreq); - if (err && err != -EADDRINUSE) + if (err) break; omode = MCAST_INCLUDE; add = 1; - } else /* IP_DROP_SOURCE_MEMBERSHIP */ { + } else /*IP_DROP_SOURCE_MEMBERSHIP */ { omode = MCAST_INCLUDE; add = 0; } @@ -754,7 +754,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, mreq.imr_address.s_addr = 0; mreq.imr_ifindex = greqs.gsr_interface; err = ip_mc_join_group(sk, &mreq); - if (err && err != -EADDRINUSE) + if (err) break; greqs.gsr_interface = mreq.imr_ifindex; omode = MCAST_INCLUDE; diff --git a/trunk/net/ipv4/tcp.c b/trunk/net/ipv4/tcp.c index ddb6ce4ecff2..29894c749163 100644 --- a/trunk/net/ipv4/tcp.c +++ b/trunk/net/ipv4/tcp.c @@ -1105,7 +1105,7 @@ static void tcp_prequeue_process(struct sock *sk) struct sk_buff *skb; struct tcp_sock *tp = tcp_sk(sk); - NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED); + NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue)); /* RX process wants to run with disabled BHs, though it is not * necessary */ @@ -1369,7 +1369,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, * is not empty. It is more elegant, but eats cycles, * unfortunately. */ - if (!skb_queue_empty(&tp->ucopy.prequeue)) + if (skb_queue_len(&tp->ucopy.prequeue)) goto do_prequeue; /* __ Set realtime policy in scheduler __ */ @@ -1394,7 +1394,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, } if (tp->rcv_nxt == tp->copied_seq && - !skb_queue_empty(&tp->ucopy.prequeue)) { + skb_queue_len(&tp->ucopy.prequeue)) { do_prequeue: tcp_prequeue_process(sk); @@ -1476,7 +1476,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, } while (len > 0); if (user_recv) { - if (!skb_queue_empty(&tp->ucopy.prequeue)) { + if (skb_queue_len(&tp->ucopy.prequeue)) { int chunk; tp->ucopy.len = copied > 0 ? len : 0; diff --git a/trunk/net/ipv4/tcp_input.c b/trunk/net/ipv4/tcp_input.c index 53a8a5399f1e..8de2f1071c2b 100644 --- a/trunk/net/ipv4/tcp_input.c +++ b/trunk/net/ipv4/tcp_input.c @@ -2802,7 +2802,7 @@ static void tcp_sack_remove(struct tcp_sock *tp) int this_sack; /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */ - if (skb_queue_empty(&tp->out_of_order_queue)) { + if (skb_queue_len(&tp->out_of_order_queue) == 0) { tp->rx_opt.num_sacks = 0; tp->rx_opt.eff_sacks = tp->rx_opt.dsack; return; @@ -2935,13 +2935,13 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) if(th->fin) tcp_fin(skb, sk, th); - if (!skb_queue_empty(&tp->out_of_order_queue)) { + if (skb_queue_len(&tp->out_of_order_queue)) { tcp_ofo_queue(sk); /* RFC2581. 4.2. SHOULD send immediate ACK, when * gap in queue is filled. */ - if (skb_queue_empty(&tp->out_of_order_queue)) + if (!skb_queue_len(&tp->out_of_order_queue)) tp->ack.pingpong = 0; } @@ -3249,8 +3249,9 @@ static int tcp_prune_queue(struct sock *sk) * This must not ever occur. */ /* First, purge the out_of_order queue. */ - if (!skb_queue_empty(&tp->out_of_order_queue)) { - NET_INC_STATS_BH(LINUX_MIB_OFOPRUNED); + if (skb_queue_len(&tp->out_of_order_queue)) { + NET_ADD_STATS_BH(LINUX_MIB_OFOPRUNED, + skb_queue_len(&tp->out_of_order_queue)); __skb_queue_purge(&tp->out_of_order_queue); /* Reset SACK state. A conforming SACK implementation will diff --git a/trunk/net/ipv4/tcp_output.c b/trunk/net/ipv4/tcp_output.c index e3f8ea1bfa9c..e041d057ec86 100644 --- a/trunk/net/ipv4/tcp_output.c +++ b/trunk/net/ipv4/tcp_output.c @@ -1613,7 +1613,7 @@ void tcp_send_fin(struct sock *sk) * was unread data in the receive queue. This behavior is recommended * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM */ -void tcp_send_active_reset(struct sock *sk, unsigned int __nocast priority) +void tcp_send_active_reset(struct sock *sk, int priority) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; diff --git a/trunk/net/ipv4/tcp_timer.c b/trunk/net/ipv4/tcp_timer.c index 0084227438c2..b127b4498565 100644 --- a/trunk/net/ipv4/tcp_timer.c +++ b/trunk/net/ipv4/tcp_timer.c @@ -231,10 +231,11 @@ static void tcp_delack_timer(unsigned long data) } tp->ack.pending &= ~TCP_ACK_TIMER; - if (!skb_queue_empty(&tp->ucopy.prequeue)) { + if (skb_queue_len(&tp->ucopy.prequeue)) { struct sk_buff *skb; - NET_INC_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED); + NET_ADD_STATS_BH(LINUX_MIB_TCPSCHEDULERFAILED, + skb_queue_len(&tp->ucopy.prequeue)); while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) sk->sk_backlog_rcv(sk, skb); diff --git a/trunk/net/ipv6/mcast.c b/trunk/net/ipv6/mcast.c index 29fed6e58d0a..562fcd14fdea 100644 --- a/trunk/net/ipv6/mcast.c +++ b/trunk/net/ipv6/mcast.c @@ -281,7 +281,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, struct in6_addr *addr) } write_unlock_bh(&ipv6_sk_mc_lock); - return -EADDRNOTAVAIL; + return -ENOENT; } static struct inet6_dev *ip6_mc_find_dev(struct in6_addr *group, int ifindex) @@ -386,16 +386,12 @@ int ip6_mc_source(int add, int omode, struct sock *sk, if (ipv6_addr_equal(&pmc->addr, group)) break; } - if (!pmc) { /* must have a prior join */ - err = -EINVAL; + if (!pmc) /* must have a prior join */ goto done; - } /* if a source filter was set, must be the same mode as before */ if (pmc->sflist) { - if (pmc->sfmode != omode) { - err = -EINVAL; + if (pmc->sfmode != omode) goto done; - } } else if (pmc->sfmode != omode) { /* allow mode switches for empty-set filters */ ip6_mc_add_src(idev, group, omode, 0, NULL, 0); @@ -406,7 +402,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk, psl = pmc->sflist; if (!add) { if (!psl) - goto done; /* err = -EADDRNOTAVAIL */ + goto done; rv = !0; for (i=0; isl_count; i++) { rv = memcmp(&psl->sl_addr[i], source, @@ -415,7 +411,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk, break; } if (rv) /* source not found */ - goto done; /* err = -EADDRNOTAVAIL */ + goto done; /* special case - (INCLUDE, empty) == LEAVE_GROUP */ if (psl->sl_count == 1 && omode == MCAST_INCLUDE) { @@ -492,7 +488,6 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) struct inet6_dev *idev; struct ipv6_pinfo *inet6 = inet6_sk(sk); struct ip6_sf_socklist *newpsl, *psl; - int leavegroup = 0; int i, err; group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr; @@ -508,12 +503,7 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) if (!idev) return -ENODEV; dev = idev->dev; - - err = 0; - if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) { - leavegroup = 1; - goto done; - } + err = -EADDRNOTAVAIL; for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) { if (pmc->ifindex != gsf->gf_interface) @@ -521,10 +511,8 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) if (ipv6_addr_equal(&pmc->addr, group)) break; } - if (!pmc) { /* must have a prior join */ - err = -EINVAL; + if (!pmc) /* must have a prior join */ goto done; - } if (gsf->gf_numsrc) { newpsl = (struct ip6_sf_socklist *)sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc), GFP_ATOMIC); @@ -556,13 +544,10 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); pmc->sflist = newpsl; pmc->sfmode = gsf->gf_fmode; - err = 0; done: read_unlock_bh(&idev->lock); in6_dev_put(idev); dev_put(dev); - if (leavegroup) - err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group); return err; } diff --git a/trunk/net/irda/irlap.c b/trunk/net/irda/irlap.c index 7029618f5719..046ad0750e48 100644 --- a/trunk/net/irda/irlap.c +++ b/trunk/net/irda/irlap.c @@ -445,8 +445,9 @@ void irlap_disconnect_request(struct irlap_cb *self) IRDA_ASSERT(self->magic == LAP_MAGIC, return;); /* Don't disconnect until all data frames are successfully sent */ - if (!skb_queue_empty(&self->txq)) { + if (skb_queue_len(&self->txq) > 0) { self->disconnect_pending = TRUE; + return; } diff --git a/trunk/net/irda/irlap_event.c b/trunk/net/irda/irlap_event.c index a505b5457608..1cd89f5f3b75 100644 --- a/trunk/net/irda/irlap_event.c +++ b/trunk/net/irda/irlap_event.c @@ -191,7 +191,7 @@ static void irlap_start_poll_timer(struct irlap_cb *self, int timeout) * Send out the RR frames faster if our own transmit queue is empty, or * if the peer is busy. The effect is a much faster conversation */ - if (skb_queue_empty(&self->txq) || self->remote_busy) { + if ((skb_queue_len(&self->txq) == 0) || (self->remote_busy)) { if (self->fast_RR == TRUE) { /* * Assert that the fast poll timer has not reached the @@ -263,7 +263,7 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event, IRDA_DEBUG(2, "%s() : queue len = %d\n", __FUNCTION__, skb_queue_len(&self->txq)); - if (!skb_queue_empty(&self->txq)) { + if (skb_queue_len(&self->txq)) { /* Prevent race conditions with irlap_data_request() */ self->local_busy = TRUE; @@ -1074,7 +1074,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, #else /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* Window has been adjusted for the max packet * size, so much simpler... - Jean II */ - nextfit = !skb_queue_empty(&self->txq); + nextfit = (skb_queue_len(&self->txq) > 0); #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* * Send data with poll bit cleared only if window > 1 @@ -1814,7 +1814,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, #else /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* Window has been adjusted for the max packet * size, so much simpler... - Jean II */ - nextfit = !skb_queue_empty(&self->txq); + nextfit = (skb_queue_len(&self->txq) > 0); #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ /* * Send data with final bit cleared only if window > 1 @@ -1937,7 +1937,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, irlap_data_indication(self, skb, FALSE); /* Any pending data requests? */ - if (!skb_queue_empty(&self->txq) && + if ((skb_queue_len(&self->txq) > 0) && (self->window > 0)) { self->ack_required = TRUE; @@ -2038,7 +2038,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, /* * Any pending data requests? */ - if (!skb_queue_empty(&self->txq) && + if ((skb_queue_len(&self->txq) > 0) && (self->window > 0) && !self->remote_busy) { irlap_data_indication(self, skb, TRUE); @@ -2069,7 +2069,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, */ nr_status = irlap_validate_nr_received(self, info->nr); if (nr_status == NR_EXPECTED) { - if (!skb_queue_empty(&self->txq) && + if ((skb_queue_len( &self->txq) > 0) && (self->window > 0)) { self->remote_busy = FALSE; diff --git a/trunk/net/irda/irlap_frame.c b/trunk/net/irda/irlap_frame.c index 6dafbb43b529..040abe714aa3 100644 --- a/trunk/net/irda/irlap_frame.c +++ b/trunk/net/irda/irlap_frame.c @@ -1018,10 +1018,11 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command) /* * We can now fill the window with additional data frames */ - while (!skb_queue_empty(&self->txq)) { + while (skb_queue_len( &self->txq) > 0) { IRDA_DEBUG(0, "%s(), sending additional frames!\n", __FUNCTION__); - if (self->window > 0) { + if ((skb_queue_len( &self->txq) > 0) && + (self->window > 0)) { skb = skb_dequeue( &self->txq); IRDA_ASSERT(skb != NULL, return;); @@ -1030,7 +1031,8 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command) * bit cleared */ if ((self->window > 1) && - !skb_queue_empty(&self->txq)) { + skb_queue_len(&self->txq) > 0) + { irlap_send_data_primary(self, skb); } else { irlap_send_data_primary_poll(self, skb); diff --git a/trunk/net/irda/irttp.c b/trunk/net/irda/irttp.c index 6602d901f8b1..d091ccf773b3 100644 --- a/trunk/net/irda/irttp.c +++ b/trunk/net/irda/irttp.c @@ -1513,7 +1513,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata, /* * Check if there is still data segments in the transmit queue */ - if (!skb_queue_empty(&self->tx_queue)) { + if (skb_queue_len(&self->tx_queue) > 0) { if (priority == P_HIGH) { /* * No need to send the queued data, if we are diff --git a/trunk/net/llc/llc_c_ev.c b/trunk/net/llc/llc_c_ev.c index d5bdb53a348f..cd130c3b72bc 100644 --- a/trunk/net/llc/llc_c_ev.c +++ b/trunk/net/llc/llc_c_ev.c @@ -84,7 +84,7 @@ static u16 llc_util_nr_inside_tx_window(struct sock *sk, u8 nr) if (llc->dev->flags & IFF_LOOPBACK) goto out; rc = 1; - if (skb_queue_empty(&llc->pdu_unack_q)) + if (!skb_queue_len(&llc->pdu_unack_q)) goto out; skb = skb_peek(&llc->pdu_unack_q); pdu = llc_pdu_sn_hdr(skb); diff --git a/trunk/net/netlink/af_netlink.c b/trunk/net/netlink/af_netlink.c index 3405fdf41b93..fc456a7aaec3 100644 --- a/trunk/net/netlink/af_netlink.c +++ b/trunk/net/netlink/af_netlink.c @@ -858,7 +858,7 @@ static inline void netlink_rcv_wake(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); - if (skb_queue_empty(&sk->sk_receive_queue)) + if (!skb_queue_len(&sk->sk_receive_queue)) clear_bit(0, &nlk->state); if (!test_bit(0, &nlk->state)) wake_up_interruptible(&nlk->wait); diff --git a/trunk/net/sched/sch_red.c b/trunk/net/sched/sch_red.c index 7845d045eec4..664d0e47374f 100644 --- a/trunk/net/sched/sch_red.c +++ b/trunk/net/sched/sch_red.c @@ -385,7 +385,7 @@ static int red_change(struct Qdisc *sch, struct rtattr *opt) memcpy(q->Stab, RTA_DATA(tb[TCA_RED_STAB-1]), 256); q->qcount = -1; - if (skb_queue_empty(&sch->q)) + if (skb_queue_len(&sch->q) == 0) PSCHED_SET_PASTPERFECT(q->qidlestart); sch_tree_unlock(sch); return 0; diff --git a/trunk/net/sctp/associola.c b/trunk/net/sctp/associola.c index 4b47dd6f2485..7ae6aa772dab 100644 --- a/trunk/net/sctp/associola.c +++ b/trunk/net/sctp/associola.c @@ -203,7 +203,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a */ asoc->addip_serial = asoc->c.initial_tsn; - INIT_LIST_HEAD(&asoc->addip_chunk_list); + skb_queue_head_init(&asoc->addip_chunks); /* Make an empty list of remote transport addresses. */ INIT_LIST_HEAD(&asoc->peer.transport_addr_list); diff --git a/trunk/net/sctp/input.c b/trunk/net/sctp/input.c index 5e085e041a6e..339f7acfdb64 100644 --- a/trunk/net/sctp/input.c +++ b/trunk/net/sctp/input.c @@ -115,17 +115,6 @@ static void sctp_rcv_set_owner_r(struct sk_buff *skb, struct sock *sk) atomic_add(sizeof(struct sctp_chunk),&sk->sk_rmem_alloc); } -struct sctp_input_cb { - union { - struct inet_skb_parm h4; -#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) - struct inet6_skb_parm h6; -#endif - } header; - struct sctp_chunk *chunk; -}; -#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0])) - /* * This is the routine which IP calls when receiving an SCTP packet. */ @@ -254,7 +243,6 @@ int sctp_rcv(struct sk_buff *skb) ret = -ENOMEM; goto discard_release; } - SCTP_INPUT_CB(skb)->chunk = chunk; sctp_rcv_set_owner_r(skb,sk); @@ -277,9 +265,9 @@ int sctp_rcv(struct sk_buff *skb) sctp_bh_lock_sock(sk); if (sock_owned_by_user(sk)) - sk_add_backlog(sk, skb); + sk_add_backlog(sk, (struct sk_buff *) chunk); else - sctp_backlog_rcv(sk, skb); + sctp_backlog_rcv(sk, (struct sk_buff *) chunk); /* Release the sock and any reference counts we took in the * lookup calls. @@ -314,8 +302,14 @@ int sctp_rcv(struct sk_buff *skb) */ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) { - struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; - struct sctp_inq *inqueue = &chunk->rcvr->inqueue; + struct sctp_chunk *chunk; + struct sctp_inq *inqueue; + + /* One day chunk will live inside the skb, but for + * now this works. + */ + chunk = (struct sctp_chunk *) skb; + inqueue = &chunk->rcvr->inqueue; sctp_inq_push(inqueue, chunk); return 0; diff --git a/trunk/net/sctp/inqueue.c b/trunk/net/sctp/inqueue.c index 2d33922c044b..cedf4351556c 100644 --- a/trunk/net/sctp/inqueue.c +++ b/trunk/net/sctp/inqueue.c @@ -50,7 +50,7 @@ /* Initialize an SCTP inqueue. */ void sctp_inq_init(struct sctp_inq *queue) { - INIT_LIST_HEAD(&queue->in_chunk_list); + skb_queue_head_init(&queue->in); queue->in_progress = NULL; /* Create a task for delivering data. */ @@ -62,13 +62,11 @@ void sctp_inq_init(struct sctp_inq *queue) /* Release the memory associated with an SCTP inqueue. */ void sctp_inq_free(struct sctp_inq *queue) { - struct sctp_chunk *chunk, *tmp; + struct sctp_chunk *chunk; /* Empty the queue. */ - list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { - list_del_init(&chunk->list); + while ((chunk = (struct sctp_chunk *) skb_dequeue(&queue->in)) != NULL) sctp_chunk_free(chunk); - } /* If there is a packet which is currently being worked on, * free it as well. @@ -94,7 +92,7 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *packet) * Eventually, we should clean up inqueue to not rely * on the BH related data structures. */ - list_add_tail(&packet->list, &q->in_chunk_list); + skb_queue_tail(&(q->in), (struct sk_buff *) packet); q->immediate.func(q->immediate.data); } @@ -133,16 +131,12 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) /* Do we need to take the next packet out of the queue to process? */ if (!chunk) { - struct list_head *entry; - /* Is the queue empty? */ - if (list_empty(&queue->in_chunk_list)) + if (skb_queue_empty(&queue->in)) return NULL; - entry = queue->in_chunk_list.next; chunk = queue->in_progress = - list_entry(entry, struct sctp_chunk, list); - list_del_init(entry); + (struct sctp_chunk *) skb_dequeue(&queue->in); /* This is the first chunk in the packet. */ chunk->singleton = 1; diff --git a/trunk/net/sctp/output.c b/trunk/net/sctp/output.c index 931371633464..84b5b370b09d 100644 --- a/trunk/net/sctp/output.c +++ b/trunk/net/sctp/output.c @@ -108,7 +108,7 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, packet->transport = transport; packet->source_port = sport; packet->destination_port = dport; - INIT_LIST_HEAD(&packet->chunk_list); + skb_queue_head_init(&packet->chunks); if (asoc) { struct sctp_sock *sp = sctp_sk(asoc->base.sk); overhead = sp->pf->af->net_header_len; @@ -129,14 +129,12 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, /* Free a packet. */ void sctp_packet_free(struct sctp_packet *packet) { - struct sctp_chunk *chunk, *tmp; + struct sctp_chunk *chunk; SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); - list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { - list_del_init(&chunk->list); + while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) sctp_chunk_free(chunk); - } if (packet->malloced) kfree(packet); @@ -278,7 +276,7 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, packet->has_sack = 1; /* It is OK to send this chunk. */ - list_add_tail(&chunk->list, &packet->chunk_list); + __skb_queue_tail(&packet->chunks, (struct sk_buff *)chunk); packet->size += chunk_len; chunk->transport = packet->transport; finish: @@ -297,7 +295,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) struct sctphdr *sh; __u32 crc32; struct sk_buff *nskb; - struct sctp_chunk *chunk, *tmp; + struct sctp_chunk *chunk; struct sock *sk; int err = 0; int padding; /* How much padding do we need? */ @@ -307,11 +305,11 @@ int sctp_packet_transmit(struct sctp_packet *packet) SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); /* Do NOT generate a chunkless packet. */ - if (list_empty(&packet->chunk_list)) + chunk = (struct sctp_chunk *)skb_peek(&packet->chunks); + if (unlikely(!chunk)) return err; /* Set up convenience variables... */ - chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); sk = chunk->skb->sk; /* Allocate the new skb. */ @@ -372,8 +370,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) * [This whole comment explains WORD_ROUND() below.] */ SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n"); - list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { - list_del_init(&chunk->list); + while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) { if (sctp_chunk_is_data(chunk)) { if (!chunk->has_tsn) { @@ -514,8 +511,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) * will get resent or dropped later. */ - list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { - list_del_init(&chunk->list); + while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) { if (!sctp_chunk_is_data(chunk)) sctp_chunk_free(chunk); } diff --git a/trunk/net/sctp/outqueue.c b/trunk/net/sctp/outqueue.c index efb72faba20c..4eb81a1407b7 100644 --- a/trunk/net/sctp/outqueue.c +++ b/trunk/net/sctp/outqueue.c @@ -75,7 +75,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); static inline void sctp_outq_head_data(struct sctp_outq *q, struct sctp_chunk *ch) { - list_add(&ch->list, &q->out_chunk_list); + __skb_queue_head(&q->out, (struct sk_buff *)ch); q->out_qlen += ch->skb->len; return; } @@ -83,22 +83,17 @@ static inline void sctp_outq_head_data(struct sctp_outq *q, /* Take data from the front of the queue. */ static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) { - struct sctp_chunk *ch = NULL; - - if (!list_empty(&q->out_chunk_list)) { - struct list_head *entry = q->out_chunk_list.next; - - ch = list_entry(entry, struct sctp_chunk, list); - list_del_init(entry); + struct sctp_chunk *ch; + ch = (struct sctp_chunk *)__skb_dequeue(&q->out); + if (ch) q->out_qlen -= ch->skb->len; - } return ch; } /* Add data chunk to the end of the queue. */ static inline void sctp_outq_tail_data(struct sctp_outq *q, struct sctp_chunk *ch) { - list_add_tail(&ch->list, &q->out_chunk_list); + __skb_queue_tail(&q->out, (struct sk_buff *)ch); q->out_qlen += ch->skb->len; return; } @@ -202,8 +197,8 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary, void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) { q->asoc = asoc; - INIT_LIST_HEAD(&q->out_chunk_list); - INIT_LIST_HEAD(&q->control_chunk_list); + skb_queue_head_init(&q->out); + skb_queue_head_init(&q->control); INIT_LIST_HEAD(&q->retransmit); INIT_LIST_HEAD(&q->sacked); INIT_LIST_HEAD(&q->abandoned); @@ -222,7 +217,7 @@ void sctp_outq_teardown(struct sctp_outq *q) { struct sctp_transport *transport; struct list_head *lchunk, *pos, *temp; - struct sctp_chunk *chunk, *tmp; + struct sctp_chunk *chunk; /* Throw away unacknowledged chunks. */ list_for_each(pos, &q->asoc->peer.transport_addr_list) { @@ -274,10 +269,8 @@ void sctp_outq_teardown(struct sctp_outq *q) q->error = 0; /* Throw away any leftover control chunks. */ - list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { - list_del_init(&chunk->list); + while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)) != NULL) sctp_chunk_free(chunk); - } } /* Free the outqueue structure and any related pending chunks. */ @@ -340,7 +333,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) break; }; } else { - list_add_tail(&chunk->list, &q->control_chunk_list); + __skb_queue_tail(&q->control, (struct sk_buff *) chunk); SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); } @@ -657,9 +650,10 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) __u16 sport = asoc->base.bind_addr.port; __u16 dport = asoc->peer.port; __u32 vtag = asoc->peer.i.init_tag; + struct sk_buff_head *queue; struct sctp_transport *transport = NULL; struct sctp_transport *new_transport; - struct sctp_chunk *chunk, *tmp; + struct sctp_chunk *chunk; sctp_xmit_t status; int error = 0; int start_timer = 0; @@ -681,9 +675,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) * ... */ - list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { - list_del_init(&chunk->list); - + queue = &q->control; + while ((chunk = (struct sctp_chunk *)skb_dequeue(queue)) != NULL) { /* Pick the right transport to use. */ new_transport = chunk->transport; @@ -821,6 +814,8 @@ int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) /* Finally, transmit new packets. */ start_timer = 0; + queue = &q->out; + while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { /* RFC 2960 6.5 Every DATA chunk MUST carry a valid * stream identifier. @@ -1154,9 +1149,8 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) /* See if all chunks are acked. * Make sure the empty queue handler will get run later. */ - q->empty = (list_empty(&q->out_chunk_list) && - list_empty(&q->control_chunk_list) && - list_empty(&q->retransmit)); + q->empty = skb_queue_empty(&q->out) && skb_queue_empty(&q->control) && + list_empty(&q->retransmit); if (!q->empty) goto finish; @@ -1685,9 +1679,9 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) if (TSN_lte(tsn, ctsn)) { list_del_init(lchunk); if (!chunk->tsn_gap_acked) { - chunk->transport->flight_size -= - sctp_data_size(chunk); - q->outstanding_bytes -= sctp_data_size(chunk); + chunk->transport->flight_size -= + sctp_data_size(chunk); + q->outstanding_bytes -= sctp_data_size(chunk); } sctp_chunk_free(chunk); } else { @@ -1735,7 +1729,7 @@ static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) nskips, &ftsn_skip_arr[0]); if (ftsn_chunk) { - list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); + __skb_queue_tail(&q->control, (struct sk_buff *)ftsn_chunk); SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); } } diff --git a/trunk/net/sctp/sm_make_chunk.c b/trunk/net/sctp/sm_make_chunk.c index 773cd93fa3d0..5baed9bb7de5 100644 --- a/trunk/net/sctp/sm_make_chunk.c +++ b/trunk/net/sctp/sm_make_chunk.c @@ -1003,7 +1003,6 @@ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, SCTP_DEBUG_PRINTK("chunkifying skb %p w/o an sk\n", skb); } - INIT_LIST_HEAD(&retval->list); retval->skb = skb; retval->asoc = (struct sctp_association *)asoc; retval->resent = 0; @@ -1117,7 +1116,8 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk) /* Possibly, free the chunk. */ void sctp_chunk_free(struct sctp_chunk *chunk) { - BUG_ON(!list_empty(&chunk->list)); + /* Make sure that we are not on any list. */ + skb_unlink((struct sk_buff *) chunk); list_del_init(&chunk->transmitted_list); /* Release our reference on the message tracker. */ @@ -2739,12 +2739,8 @@ int sctp_process_asconf_ack(struct sctp_association *asoc, asoc->addip_last_asconf = NULL; /* Send the next asconf chunk from the addip chunk queue. */ - if (!list_empty(&asoc->addip_chunk_list)) { - struct list_head *entry = asoc->addip_chunk_list.next; - asconf = list_entry(entry, struct sctp_chunk, list); - - list_del_init(entry); - + asconf = (struct sctp_chunk *)__skb_dequeue(&asoc->addip_chunks); + if (asconf) { /* Hold the chunk until an ASCONF_ACK is received. */ sctp_chunk_hold(asconf); if (sctp_primitive_ASCONF(asoc, asconf)) diff --git a/trunk/net/sctp/socket.c b/trunk/net/sctp/socket.c index 091a66f06a35..aad55dc3792b 100644 --- a/trunk/net/sctp/socket.c +++ b/trunk/net/sctp/socket.c @@ -406,7 +406,7 @@ static int sctp_send_asconf(struct sctp_association *asoc, * transmission. */ if (asoc->addip_last_asconf) { - list_add_tail(&chunk->list, &asoc->addip_chunk_list); + __skb_queue_tail(&asoc->addip_chunks, (struct sk_buff *)chunk); goto out; } diff --git a/trunk/net/unix/af_unix.c b/trunk/net/unix/af_unix.c index d403e34088ad..c420eba4876b 100644 --- a/trunk/net/unix/af_unix.c +++ b/trunk/net/unix/af_unix.c @@ -302,7 +302,7 @@ static void unix_write_space(struct sock *sk) * may receive messages only from that peer. */ static void unix_dgram_disconnected(struct sock *sk, struct sock *other) { - if (!skb_queue_empty(&sk->sk_receive_queue)) { + if (skb_queue_len(&sk->sk_receive_queue)) { skb_queue_purge(&sk->sk_receive_queue); wake_up_interruptible_all(&unix_sk(sk)->peer_wait); @@ -1619,7 +1619,7 @@ static long unix_stream_data_wait(struct sock * sk, long timeo) for (;;) { prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); - if (!skb_queue_empty(&sk->sk_receive_queue) || + if (skb_queue_len(&sk->sk_receive_queue) || sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN) || signal_pending(current) ||