From e4283f82796138953f5871d72db5ecb97dadde91 Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Sun, 24 Aug 2008 02:45:32 +0200 Subject: [PATCH] --- yaml --- r: 109239 b: refs/heads/master c: 819ddcafb33136f2ba18018a22edcf857f640528 h: refs/heads/master i: 109237: 5f2bb09b40f2570fe131d91b50a080da70977678 109235: 7dffe58b29f88f72ed61c8f2b1525087fd0305e1 109231: 99a54c9d548af96bc8f849a48d92a71f3a5307d9 v: v3 --- [refs] | 2 +- trunk/drivers/net/bnx2x.h | 2 +- trunk/drivers/net/bnx2x_main.c | 220 +++++++++++++++---------------- trunk/drivers/net/mv643xx_eth.c | 3 +- trunk/include/net/pkt_sched.h | 1 - trunk/include/net/sch_generic.h | 17 +-- trunk/net/ipv4/icmp.c | 22 ++-- trunk/net/ipv4/route.c | 24 +--- trunk/net/ipv4/tcp_output.c | 6 +- trunk/net/ipv6/addrconf.c | 1 - trunk/net/ipv6/icmp.c | 23 ++-- trunk/net/ipv6/sysctl_net_ipv6.c | 2 +- trunk/net/sched/sch_api.c | 66 ++-------- trunk/net/sched/sch_cbq.c | 8 +- trunk/net/sched/sch_generic.c | 9 +- trunk/net/sched/sch_hfsc.c | 4 +- trunk/net/sched/sch_htb.c | 4 +- trunk/net/sctp/auth.c | 4 - trunk/net/sctp/endpointola.c | 4 +- trunk/net/sctp/socket.c | 90 +++---------- 20 files changed, 181 insertions(+), 331 deletions(-) diff --git a/[refs] b/[refs] index 36d22b2c9d90..09be3896d49b 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: d994af0d50efc96b2077978fe9f066992639d525 +refs/heads/master: 819ddcafb33136f2ba18018a22edcf857f640528 diff --git a/trunk/drivers/net/bnx2x.h b/trunk/drivers/net/bnx2x.h index a14dba1afcc5..b468f904c7f8 100644 --- a/trunk/drivers/net/bnx2x.h +++ b/trunk/drivers/net/bnx2x.h @@ -271,7 +271,7 @@ struct bnx2x_fastpath { (fp->tx_pkt_prod != fp->tx_pkt_cons)) #define BNX2X_HAS_RX_WORK(fp) \ - (fp->rx_comp_cons != rx_cons_sb) + (fp->rx_comp_cons != le16_to_cpu(*fp->rx_cons_sb)) #define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp)) diff --git a/trunk/drivers/net/bnx2x_main.c b/trunk/drivers/net/bnx2x_main.c index 82deea0a63f5..971576b43687 100644 --- a/trunk/drivers/net/bnx2x_main.c +++ b/trunk/drivers/net/bnx2x_main.c @@ -59,8 +59,8 @@ #include "bnx2x.h" #include "bnx2x_init.h" -#define DRV_MODULE_VERSION "1.45.20" -#define DRV_MODULE_RELDATE "2008/08/25" +#define DRV_MODULE_VERSION "1.45.17" +#define DRV_MODULE_RELDATE "2008/08/13" #define BNX2X_BC_VER 0x040200 /* Time in jiffies before concluding the transmitter is hung */ @@ -1717,8 +1717,8 @@ static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) return -EEXIST; } - /* Try for 5 second every 5ms */ - for (cnt = 0; cnt < 1000; cnt++) { + /* Try for 1 second every 5ms */ + for (cnt = 0; cnt < 200; cnt++) { /* Try to acquire the lock */ REG_WR(bp, hw_lock_control_reg + 4, resource_bit); lock_status = REG_RD(bp, hw_lock_control_reg); @@ -2550,7 +2550,6 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) BNX2X_ERR("SPIO5 hw attention\n"); switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { - case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G: case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: /* Fan failure attention */ @@ -4606,17 +4605,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp) { int i; - if (bp->flags & TPA_ENABLE_FLAG) { - struct tstorm_eth_tpa_exist tpa = {0}; - - tpa.tpa_exist = 1; - - REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET, - ((u32 *)&tpa)[0]); - REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4, - ((u32 *)&tpa)[1]); - } - /* Zero this manually as its initialization is currently missing in the initTool */ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) @@ -5349,7 +5337,6 @@ static int bnx2x_init_common(struct bnx2x *bp) } switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { - case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G: case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: /* Fan failure is indicated by SPIO 5 */ bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, @@ -5376,6 +5363,17 @@ static int bnx2x_init_common(struct bnx2x *bp) enable_blocks_attention(bp); + if (bp->flags & TPA_ENABLE_FLAG) { + struct tstorm_eth_tpa_exist tmp = {0}; + + tmp.tpa_exist = 1; + + REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET, + ((u32 *)&tmp)[0]); + REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4, + ((u32 *)&tmp)[1]); + } + if (!BP_NOMCP(bp)) { bnx2x_acquire_phy_lock(bp); bnx2x_common_init_phy(bp, bp->common.shmem_base); @@ -5533,7 +5531,6 @@ static int bnx2x_init_port(struct bnx2x *bp) /* Port DMAE comes here */ switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { - case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G: case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: /* add SPIO 5 to group 0 */ val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); @@ -6058,44 +6055,6 @@ static int bnx2x_req_irq(struct bnx2x *bp) return rc; } -static void bnx2x_napi_enable(struct bnx2x *bp) -{ - int i; - - for_each_queue(bp, i) - napi_enable(&bnx2x_fp(bp, i, napi)); -} - -static void bnx2x_napi_disable(struct bnx2x *bp) -{ - int i; - - for_each_queue(bp, i) - napi_disable(&bnx2x_fp(bp, i, napi)); -} - -static void bnx2x_netif_start(struct bnx2x *bp) -{ - if (atomic_dec_and_test(&bp->intr_sem)) { - if (netif_running(bp->dev)) { - if (bp->state == BNX2X_STATE_OPEN) - netif_wake_queue(bp->dev); - bnx2x_napi_enable(bp); - bnx2x_int_enable(bp); - } - } -} - -static void bnx2x_netif_stop(struct bnx2x *bp) -{ - bnx2x_int_disable_sync(bp); - if (netif_running(bp->dev)) { - bnx2x_napi_disable(bp); - netif_tx_disable(bp->dev); - bp->dev->trans_start = jiffies; /* prevent tx timeout */ - } -} - /* * Init service functions */ @@ -6379,7 +6338,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) rc = bnx2x_init_hw(bp, load_code); if (rc) { BNX2X_ERR("HW init failed, aborting\n"); - goto load_int_disable; + goto load_error; } /* Setup NIC internals and enable interrupts */ @@ -6391,7 +6350,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) if (!load_code) { BNX2X_ERR("MCP response failure, aborting\n"); rc = -EBUSY; - goto load_rings_free; + goto load_int_disable; } } @@ -6401,7 +6360,8 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Enable Rx interrupt handling before sending the ramrod as it's completed on Rx FP queue */ - bnx2x_napi_enable(bp); + for_each_queue(bp, i) + napi_enable(&bnx2x_fp(bp, i, napi)); /* Enable interrupt handling */ atomic_set(&bp->intr_sem, 0); @@ -6409,7 +6369,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) rc = bnx2x_setup_leading(bp); if (rc) { BNX2X_ERR("Setup leading failed!\n"); - goto load_netif_stop; + goto load_stop_netif; } if (CHIP_IS_E1H(bp)) @@ -6422,7 +6382,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) for_each_nondefault_queue(bp, i) { rc = bnx2x_setup_multi(bp, i); if (rc) - goto load_netif_stop; + goto load_stop_netif; } if (CHIP_IS_E1(bp)) @@ -6467,17 +6427,20 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) return 0; -load_netif_stop: - bnx2x_napi_disable(bp); -load_rings_free: - /* Free SKBs, SGEs, TPA pool and driver internals */ - bnx2x_free_skbs(bp); +load_stop_netif: for_each_queue(bp, i) - bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); + napi_disable(&bnx2x_fp(bp, i, napi)); + load_int_disable: bnx2x_int_disable_sync(bp); + /* Release IRQs */ bnx2x_free_irq(bp); + + /* Free SKBs, SGEs, TPA pool and driver internals */ + bnx2x_free_skbs(bp); + for_each_queue(bp, i) + bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); load_error: bnx2x_free_mem(bp); @@ -6492,7 +6455,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index) /* halt the connection */ bp->fp[index].state = BNX2X_FP_STATE_HALTING; - bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0); + bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); /* Wait for completion */ rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, @@ -6650,9 +6613,11 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) bp->rx_mode = BNX2X_RX_MODE_NONE; bnx2x_set_storm_rx_mode(bp); - bnx2x_netif_stop(bp); - if (!netif_running(bp->dev)) - bnx2x_napi_disable(bp); + if (netif_running(bp->dev)) { + netif_tx_disable(bp->dev); + bp->dev->trans_start = jiffies; /* prevent tx timeout */ + } + del_timer_sync(&bp->timer); SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); @@ -6666,7 +6631,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) smp_rmb(); while (BNX2X_HAS_TX_WORK(fp)) { - bnx2x_tx_int(fp, 1000); + if (!netif_running(bp->dev)) + bnx2x_tx_int(fp, 1000); + if (!cnt) { BNX2X_ERR("timeout waiting for queue[%d]\n", i); @@ -6682,12 +6649,46 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) smp_rmb(); } } + /* Give HW time to discard old tx messages */ msleep(1); + for_each_queue(bp, i) + napi_disable(&bnx2x_fp(bp, i, napi)); + /* Disable interrupts after Tx and Rx are disabled on stack level */ + bnx2x_int_disable_sync(bp); + /* Release IRQs */ bnx2x_free_irq(bp); + if (unload_mode == UNLOAD_NORMAL) + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + + else if (bp->flags & NO_WOL_FLAG) { + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; + if (CHIP_IS_E1H(bp)) + REG_WR(bp, MISC_REG_E1HMF_MODE, 0); + + } else if (bp->wol) { + u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + u8 *mac_addr = bp->dev->dev_addr; + u32 val; + /* The mac address is written to entries 1-4 to + preserve entry 0 which is used by the PMF */ + u8 entry = (BP_E1HVN(bp) + 1)*8; + + val = (mac_addr[0] << 8) | mac_addr[1]; + EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); + + val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); + + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; + + } else + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + if (CHIP_IS_E1(bp)) { struct mac_configuration_cmd *config = bnx2x_sp(bp, mcast_config); @@ -6710,41 +6711,14 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); } else { /* E1H */ - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); - bnx2x_set_mac_addr_e1h(bp, 0); for (i = 0; i < MC_HASH_SIZE; i++) REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); } - if (unload_mode == UNLOAD_NORMAL) - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - - else if (bp->flags & NO_WOL_FLAG) { - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; - if (CHIP_IS_E1H(bp)) - REG_WR(bp, MISC_REG_E1HMF_MODE, 0); - - } else if (bp->wol) { - u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; - u8 *mac_addr = bp->dev->dev_addr; - u32 val; - /* The mac address is written to entries 1-4 to - preserve entry 0 which is used by the PMF */ - u8 entry = (BP_E1HVN(bp) + 1)*8; - - val = (mac_addr[0] << 8) | mac_addr[1]; - EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); - - val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | - (mac_addr[4] << 8) | mac_addr[5]; - EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); - - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; - - } else - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + if (CHIP_IS_E1H(bp)) + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); /* Close multi and leading connections Completions for ramrods are collected in a synchronous way */ @@ -6847,10 +6821,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) */ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); - if (val == 0x7) - REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); - if (val == 0x7) { u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; /* save our func */ @@ -6928,6 +6898,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK); } + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); } } @@ -8646,6 +8617,34 @@ static int bnx2x_test_memory(struct bnx2x *bp) return rc; } +static void bnx2x_netif_start(struct bnx2x *bp) +{ + int i; + + if (atomic_dec_and_test(&bp->intr_sem)) { + if (netif_running(bp->dev)) { + bnx2x_int_enable(bp); + for_each_queue(bp, i) + napi_enable(&bnx2x_fp(bp, i, napi)); + if (bp->state == BNX2X_STATE_OPEN) + netif_wake_queue(bp->dev); + } + } +} + +static void bnx2x_netif_stop(struct bnx2x *bp) +{ + int i; + + if (netif_running(bp->dev)) { + netif_tx_disable(bp->dev); + bp->dev->trans_start = jiffies; /* prevent tx timeout */ + for_each_queue(bp, i) + napi_disable(&bnx2x_fp(bp, i, napi)); + } + bnx2x_int_disable_sync(bp); +} + static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) { int cnt = 1000; @@ -9251,7 +9250,6 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) napi); struct bnx2x *bp = fp->bp; int work_done = 0; - u16 rx_cons_sb; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) @@ -9267,16 +9265,10 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) if (BNX2X_HAS_TX_WORK(fp)) bnx2x_tx_int(fp, budget); - rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); - if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) - rx_cons_sb++; if (BNX2X_HAS_RX_WORK(fp)) work_done = bnx2x_rx_int(fp, budget); rmb(); /* BNX2X_HAS_WORK() reads the status block */ - rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); - if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) - rx_cons_sb++; /* must not complete if we consumed full budget */ if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) { @@ -9492,7 +9484,8 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) fp_index = (smp_processor_id() % bp->num_queues); fp = &bp->fp[fp_index]; - if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { + if (unlikely(bnx2x_tx_avail(bp->fp) < + (skb_shinfo(skb)->nr_frags + 3))) { bp->eth_stats.driver_xoff++, netif_stop_queue(dev); BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); @@ -9555,6 +9548,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_bd->vlan = cpu_to_le16(pkt_prod); if (xmit_type) { + /* turn on parsing and get a BD */ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); pbd = (void *)&fp->tx_desc_ring[bd_prod]; diff --git a/trunk/drivers/net/mv643xx_eth.c b/trunk/drivers/net/mv643xx_eth.c index 46819af3b062..af279f26c407 100644 --- a/trunk/drivers/net/mv643xx_eth.c +++ b/trunk/drivers/net/mv643xx_eth.c @@ -650,8 +650,6 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) if (rx < budget) { netif_rx_complete(mp->dev, napi); - wrl(mp, INT_CAUSE(mp->port_num), 0); - wrl(mp, INT_CAUSE_EXT(mp->port_num), 0); wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); } @@ -1796,6 +1794,7 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) */ #ifdef MV643XX_ETH_NAPI if (int_cause & INT_RX) { + wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_RX)); wrl(mp, INT_MASK(mp->port_num), 0x00000000); rdl(mp, INT_MASK(mp->port_num)); diff --git a/trunk/include/net/pkt_sched.h b/trunk/include/net/pkt_sched.h index b786a5b09253..853fe83d9f37 100644 --- a/trunk/include/net/pkt_sched.h +++ b/trunk/include/net/pkt_sched.h @@ -78,7 +78,6 @@ extern struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, extern int register_qdisc(struct Qdisc_ops *qops); extern int unregister_qdisc(struct Qdisc_ops *qops); -extern void qdisc_list_del(struct Qdisc *q); extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, diff --git a/trunk/include/net/sch_generic.h b/trunk/include/net/sch_generic.h index e5569625d2a5..84d25f2e6188 100644 --- a/trunk/include/net/sch_generic.h +++ b/trunk/include/net/sch_generic.h @@ -193,11 +193,6 @@ static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc) return qdisc->dev_queue->qdisc; } -static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc) -{ - return qdisc->dev_queue->qdisc_sleeping; -} - /* The qdisc root lock is a mechanism by which to top level * of a qdisc tree can be locked from any qdisc node in the * forest. This allows changing the configuration of some @@ -217,14 +212,6 @@ static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc) return qdisc_lock(root); } -static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc) -{ - struct Qdisc *root = qdisc_root_sleeping(qdisc); - - ASSERT_RTNL(); - return qdisc_lock(root); -} - static inline struct net_device *qdisc_dev(struct Qdisc *qdisc) { return qdisc->dev_queue->dev; @@ -232,12 +219,12 @@ static inline struct net_device *qdisc_dev(struct Qdisc *qdisc) static inline void sch_tree_lock(struct Qdisc *q) { - spin_lock_bh(qdisc_root_sleeping_lock(q)); + spin_lock_bh(qdisc_root_lock(q)); } static inline void sch_tree_unlock(struct Qdisc *q) { - spin_unlock_bh(qdisc_root_sleeping_lock(q)); + spin_unlock_bh(qdisc_root_lock(q)); } #define tcf_tree_lock(tp) sch_tree_lock((tp)->q) diff --git a/trunk/net/ipv4/icmp.c b/trunk/net/ipv4/icmp.c index 55c355e63234..860558633b2c 100644 --- a/trunk/net/ipv4/icmp.c +++ b/trunk/net/ipv4/icmp.c @@ -204,22 +204,18 @@ static struct sock *icmp_sk(struct net *net) return net->ipv4.icmp_sk[smp_processor_id()]; } -static inline struct sock *icmp_xmit_lock(struct net *net) +static inline int icmp_xmit_lock(struct sock *sk) { - struct sock *sk; - local_bh_disable(); - sk = icmp_sk(net); - if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path signals a * dst_link_failure() for an outgoing ICMP packet. */ local_bh_enable(); - return NULL; + return 1; } - return sk; + return 0; } static inline void icmp_xmit_unlock(struct sock *sk) @@ -358,17 +354,15 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) struct ipcm_cookie ipc; struct rtable *rt = skb->rtable; struct net *net = dev_net(rt->u.dst.dev); - struct sock *sk; - struct inet_sock *inet; + struct sock *sk = icmp_sk(net); + struct inet_sock *inet = inet_sk(sk); __be32 daddr; if (ip_options_echo(&icmp_param->replyopts, skb)) return; - sk = icmp_xmit_lock(net); - if (sk == NULL) + if (icmp_xmit_lock(sk)) return; - inet = inet_sk(sk); icmp_param->data.icmph.checksum = 0; @@ -425,6 +419,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) if (!rt) goto out; net = dev_net(rt->u.dst.dev); + sk = icmp_sk(net); /* * Find the original header. It is expected to be valid, of course. @@ -488,8 +483,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) } } - sk = icmp_xmit_lock(net); - if (sk == NULL) + if (icmp_xmit_lock(sk)) return; /* diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c index 6ee5354c9aa1..cca921ea8550 100644 --- a/trunk/net/ipv4/route.c +++ b/trunk/net/ipv4/route.c @@ -3116,23 +3116,14 @@ static ctl_table ipv4_route_table[] = { { .ctl_name = 0 } }; -static struct ctl_table empty[1]; - -static struct ctl_table ipv4_skeleton[] = -{ - { .procname = "route", .ctl_name = NET_IPV4_ROUTE, - .mode = 0555, .child = ipv4_route_table}, - { .procname = "neigh", .ctl_name = NET_IPV4_NEIGH, - .mode = 0555, .child = empty}, - { } -}; - -static __net_initdata struct ctl_path ipv4_path[] = { +static __net_initdata struct ctl_path ipv4_route_path[] = { { .procname = "net", .ctl_name = CTL_NET, }, { .procname = "ipv4", .ctl_name = NET_IPV4, }, + { .procname = "route", .ctl_name = NET_IPV4_ROUTE, }, { }, }; + static struct ctl_table ipv4_route_flush_table[] = { { .ctl_name = NET_IPV4_ROUTE_FLUSH, @@ -3145,13 +3136,6 @@ static struct ctl_table ipv4_route_flush_table[] = { { .ctl_name = 0 }, }; -static __net_initdata struct ctl_path ipv4_route_path[] = { - { .procname = "net", .ctl_name = CTL_NET, }, - { .procname = "ipv4", .ctl_name = NET_IPV4, }, - { .procname = "route", .ctl_name = NET_IPV4_ROUTE, }, - { }, -}; - static __net_init int sysctl_route_net_init(struct net *net) { struct ctl_table *tbl; @@ -3303,7 +3287,7 @@ int __init ip_rt_init(void) */ void __init ip_static_sysctl_init(void) { - register_sysctl_paths(ipv4_path, ipv4_skeleton); + register_sysctl_paths(ipv4_route_path, ipv4_route_table); } #endif diff --git a/trunk/net/ipv4/tcp_output.c b/trunk/net/ipv4/tcp_output.c index 8165f5aa8c71..a00532de2a8c 100644 --- a/trunk/net/ipv4/tcp_output.c +++ b/trunk/net/ipv4/tcp_output.c @@ -468,8 +468,7 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, } if (likely(sysctl_tcp_window_scaling)) { opts->ws = tp->rx_opt.rcv_wscale; - if(likely(opts->ws)) - size += TCPOLEN_WSCALE_ALIGNED; + size += TCPOLEN_WSCALE_ALIGNED; } if (likely(sysctl_tcp_sack)) { opts->options |= OPTION_SACK_ADVERTISE; @@ -510,8 +509,7 @@ static unsigned tcp_synack_options(struct sock *sk, if (likely(ireq->wscale_ok)) { opts->ws = ireq->rcv_wscale; - if(likely(opts->ws)) - size += TCPOLEN_WSCALE_ALIGNED; + size += TCPOLEN_WSCALE_ALIGNED; } if (likely(doing_ts)) { opts->options |= OPTION_TS; diff --git a/trunk/net/ipv6/addrconf.c b/trunk/net/ipv6/addrconf.c index 7b6a584b62dd..e2d3b7580b76 100644 --- a/trunk/net/ipv6/addrconf.c +++ b/trunk/net/ipv6/addrconf.c @@ -1688,7 +1688,6 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev, .fc_dst_len = plen, .fc_flags = RTF_UP | flags, .fc_nlinfo.nl_net = dev_net(dev), - .fc_protocol = RTPROT_KERNEL, }; ipv6_addr_copy(&cfg.fc_dst, pfx); diff --git a/trunk/net/ipv6/icmp.c b/trunk/net/ipv6/icmp.c index b3157a0cc15d..abedf95fdf2d 100644 --- a/trunk/net/ipv6/icmp.c +++ b/trunk/net/ipv6/icmp.c @@ -91,22 +91,19 @@ static struct inet6_protocol icmpv6_protocol = { .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; -static __inline__ struct sock *icmpv6_xmit_lock(struct net *net) +static __inline__ int icmpv6_xmit_lock(struct sock *sk) { - struct sock *sk; - local_bh_disable(); - sk = icmpv6_sk(net); if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path (f.e. SIT or * ip6ip6 tunnel) signals dst_link_failure() for an * outgoing ICMP6 packet. */ local_bh_enable(); - return NULL; + return 1; } - return sk; + return 0; } static __inline__ void icmpv6_xmit_unlock(struct sock *sk) @@ -395,11 +392,12 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, fl.fl_icmp_code = code; security_skb_classify_flow(skb, &fl); - sk = icmpv6_xmit_lock(net); - if (sk == NULL) - return; + sk = icmpv6_sk(net); np = inet6_sk(sk); + if (icmpv6_xmit_lock(sk)) + return; + if (!icmpv6_xrlim_allow(sk, type, &fl)) goto out; @@ -541,11 +539,12 @@ static void icmpv6_echo_reply(struct sk_buff *skb) fl.fl_icmp_type = ICMPV6_ECHO_REPLY; security_skb_classify_flow(skb, &fl); - sk = icmpv6_xmit_lock(net); - if (sk == NULL) - return; + sk = icmpv6_sk(net); np = inet6_sk(sk); + if (icmpv6_xmit_lock(sk)) + return; + if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) fl.oif = np->mcast_oif; diff --git a/trunk/net/ipv6/sysctl_net_ipv6.c b/trunk/net/ipv6/sysctl_net_ipv6.c index 587f8f60c489..e6dfaeac6be3 100644 --- a/trunk/net/ipv6/sysctl_net_ipv6.c +++ b/trunk/net/ipv6/sysctl_net_ipv6.c @@ -156,7 +156,7 @@ static struct ctl_table_header *ip6_base; int ipv6_static_sysctl_register(void) { static struct ctl_table empty[1]; - ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty); + ip6_base = register_net_sysctl_rotable(net_ipv6_ctl_path, empty); if (ip6_base == NULL) return -ENOMEM; return 0; diff --git a/trunk/net/sched/sch_api.c b/trunk/net/sched/sch_api.c index 506b709510b6..ef0efeca6352 100644 --- a/trunk/net/sched/sch_api.c +++ b/trunk/net/sched/sch_api.c @@ -199,53 +199,19 @@ struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) return NULL; } -/* - * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen() - * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue() - */ -static DEFINE_SPINLOCK(qdisc_list_lock); - -static void qdisc_list_add(struct Qdisc *q) -{ - if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { - spin_lock_bh(&qdisc_list_lock); - list_add_tail(&q->list, &qdisc_root_sleeping(q)->list); - spin_unlock_bh(&qdisc_list_lock); - } -} - -void qdisc_list_del(struct Qdisc *q) -{ - if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { - spin_lock_bh(&qdisc_list_lock); - list_del(&q->list); - spin_unlock_bh(&qdisc_list_lock); - } -} -EXPORT_SYMBOL(qdisc_list_del); - struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) { unsigned int i; - struct Qdisc *q; - - spin_lock_bh(&qdisc_list_lock); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - struct Qdisc *txq_root = txq->qdisc_sleeping; + struct Qdisc *q, *txq_root = txq->qdisc_sleeping; q = qdisc_match_from_root(txq_root, handle); if (q) - goto unlock; + return q; } - - q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); - -unlock: - spin_unlock_bh(&qdisc_list_lock); - - return q; + return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); } static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) @@ -478,10 +444,6 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) { ktime_t time; - if (test_bit(__QDISC_STATE_DEACTIVATED, - &qdisc_root_sleeping(wd->qdisc)->state)) - return; - wd->qdisc->flags |= TCQ_F_THROTTLED; time = ktime_set(0, 0); time = ktime_add_ns(time, PSCHED_US2NS(expires)); @@ -624,7 +586,7 @@ static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; spinlock_t *root_lock; - root_lock = qdisc_lock(oqdisc); + root_lock = qdisc_root_lock(oqdisc); spin_lock_bh(root_lock); /* Prune old scheduler */ @@ -635,7 +597,7 @@ static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, if (qdisc == NULL) qdisc = &noop_qdisc; dev_queue->qdisc_sleeping = qdisc; - rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); + dev_queue->qdisc = &noop_qdisc; spin_unlock_bh(root_lock); @@ -830,16 +792,9 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, sch->stab = stab; } if (tca[TCA_RATE]) { - spinlock_t *root_lock; - - if ((sch->parent != TC_H_ROOT) && - !(sch->flags & TCQ_F_INGRESS)) - root_lock = qdisc_root_sleeping_lock(sch); - else - root_lock = qdisc_lock(sch); - err = gen_new_estimator(&sch->bstats, &sch->rate_est, - root_lock, tca[TCA_RATE]); + qdisc_root_lock(sch), + tca[TCA_RATE]); if (err) { /* * Any broken qdiscs that would require @@ -851,8 +806,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, goto err_out3; } } - - qdisc_list_add(sch); + if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS)) + list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list); return sch; } @@ -891,8 +846,7 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) if (tca[TCA_RATE]) gen_replace_estimator(&sch->bstats, &sch->rate_est, - qdisc_root_sleeping_lock(sch), - tca[TCA_RATE]); + qdisc_root_lock(sch), tca[TCA_RATE]); return 0; } diff --git a/trunk/net/sched/sch_cbq.c b/trunk/net/sched/sch_cbq.c index 9b720adedead..47ef492c4ff4 100644 --- a/trunk/net/sched/sch_cbq.c +++ b/trunk/net/sched/sch_cbq.c @@ -521,10 +521,6 @@ static void cbq_ovl_delay(struct cbq_class *cl) struct cbq_sched_data *q = qdisc_priv(cl->qdisc); psched_tdiff_t delay = cl->undertime - q->now; - if (test_bit(__QDISC_STATE_DEACTIVATED, - &qdisc_root_sleeping(cl->qdisc)->state)) - return; - if (!cl->delayed) { psched_time_t sched = q->now; ktime_t expires; @@ -1839,7 +1835,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t if (tca[TCA_RATE]) gen_replace_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_sleeping_lock(sch), + qdisc_root_lock(sch), tca[TCA_RATE]); return 0; } @@ -1930,7 +1926,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t if (tca[TCA_RATE]) gen_new_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); + qdisc_root_lock(sch), tca[TCA_RATE]); *arg = (unsigned long)cl; return 0; diff --git a/trunk/net/sched/sch_generic.c b/trunk/net/sched/sch_generic.c index 9634091ee2f0..c3ed4d44fc14 100644 --- a/trunk/net/sched/sch_generic.c +++ b/trunk/net/sched/sch_generic.c @@ -526,9 +526,10 @@ void qdisc_destroy(struct Qdisc *qdisc) !atomic_dec_and_test(&qdisc->refcnt)) return; -#ifdef CONFIG_NET_SCHED - qdisc_list_del(qdisc); + if (qdisc->parent) + list_del(&qdisc->list); +#ifdef CONFIG_NET_SCHED qdisc_put_stab(qdisc->stab); #endif gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); @@ -634,7 +635,7 @@ static void dev_deactivate_queue(struct net_device *dev, if (!(qdisc->flags & TCQ_F_BUILTIN)) set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); - rcu_assign_pointer(dev_queue->qdisc, qdisc_default); + dev_queue->qdisc = qdisc_default; qdisc_reset(qdisc); spin_unlock_bh(qdisc_lock(qdisc)); @@ -709,7 +710,7 @@ static void shutdown_scheduler_queue(struct net_device *dev, struct Qdisc *qdisc_default = _qdisc_default; if (qdisc) { - rcu_assign_pointer(dev_queue->qdisc, qdisc_default); + dev_queue->qdisc = qdisc_default; dev_queue->qdisc_sleeping = qdisc_default; qdisc_destroy(qdisc); diff --git a/trunk/net/sched/sch_hfsc.c b/trunk/net/sched/sch_hfsc.c index c1e77da8cd09..c2b8d9cce3d2 100644 --- a/trunk/net/sched/sch_hfsc.c +++ b/trunk/net/sched/sch_hfsc.c @@ -1045,7 +1045,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (tca[TCA_RATE]) gen_replace_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_sleeping_lock(sch), + qdisc_root_lock(sch), tca[TCA_RATE]); return 0; } @@ -1104,7 +1104,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (tca[TCA_RATE]) gen_new_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); + qdisc_root_lock(sch), tca[TCA_RATE]); *arg = (unsigned long)cl; return 0; } diff --git a/trunk/net/sched/sch_htb.c b/trunk/net/sched/sch_htb.c index 97d4761cc31e..0df0df202ed0 100644 --- a/trunk/net/sched/sch_htb.c +++ b/trunk/net/sched/sch_htb.c @@ -1372,7 +1372,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, goto failure; gen_new_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_sleeping_lock(sch), + qdisc_root_lock(sch), tca[TCA_RATE] ? : &est.nla); cl->refcnt = 1; cl->children = 0; @@ -1427,7 +1427,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, } else { if (tca[TCA_RATE]) gen_replace_estimator(&cl->bstats, &cl->rate_est, - qdisc_root_sleeping_lock(sch), + qdisc_root_lock(sch), tca[TCA_RATE]); sch_tree_lock(sch); } diff --git a/trunk/net/sctp/auth.c b/trunk/net/sctp/auth.c index 1fcb4cf2f4c9..675a5c3e68a6 100644 --- a/trunk/net/sctp/auth.c +++ b/trunk/net/sctp/auth.c @@ -80,10 +80,6 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp) { struct sctp_auth_bytes *key; - /* Verify that we are not going to overflow INT_MAX */ - if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes)) - return NULL; - /* Allocate the shared key */ key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp); if (!key) diff --git a/trunk/net/sctp/endpointola.c b/trunk/net/sctp/endpointola.c index 4c8d9f45ce09..e39a0cdef184 100644 --- a/trunk/net/sctp/endpointola.c +++ b/trunk/net/sctp/endpointola.c @@ -103,7 +103,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, /* Initialize the CHUNKS parameter */ auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS; - auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t)); /* If the Add-IP functionality is enabled, we must * authenticate, ASCONF and ASCONF-ACK chunks @@ -111,7 +110,8 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, if (sctp_addip_enable) { auth_chunks->chunks[0] = SCTP_CID_ASCONF; auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; - auth_chunks->param_hdr.length += htons(2); + auth_chunks->param_hdr.length = + htons(sizeof(sctp_paramhdr_t) + 2); } } diff --git a/trunk/net/sctp/socket.c b/trunk/net/sctp/socket.c index afa952e726d7..dbb79adf8f3c 100644 --- a/trunk/net/sctp/socket.c +++ b/trunk/net/sctp/socket.c @@ -3055,9 +3055,6 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk, { struct sctp_authchunk val; - if (!sctp_auth_enable) - return -EACCES; - if (optlen != sizeof(struct sctp_authchunk)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) @@ -3088,9 +3085,6 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk, struct sctp_hmacalgo *hmacs; int err; - if (!sctp_auth_enable) - return -EACCES; - if (optlen < sizeof(struct sctp_hmacalgo)) return -EINVAL; @@ -3129,9 +3123,6 @@ static int sctp_setsockopt_auth_key(struct sock *sk, struct sctp_association *asoc; int ret; - if (!sctp_auth_enable) - return -EACCES; - if (optlen <= sizeof(struct sctp_authkey)) return -EINVAL; @@ -3144,11 +3135,6 @@ static int sctp_setsockopt_auth_key(struct sock *sk, goto out; } - if (authkey->sca_keylength > optlen) { - ret = -EINVAL; - goto out; - } - asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { ret = -EINVAL; @@ -3174,9 +3160,6 @@ static int sctp_setsockopt_active_key(struct sock *sk, struct sctp_authkeyid val; struct sctp_association *asoc; - if (!sctp_auth_enable) - return -EACCES; - if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) @@ -3202,9 +3185,6 @@ static int sctp_setsockopt_del_key(struct sock *sk, struct sctp_authkeyid val; struct sctp_association *asoc; - if (!sctp_auth_enable) - return -EACCES; - if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) @@ -5217,29 +5197,19 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len, static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, char __user *optval, int __user *optlen) { - struct sctp_hmacalgo __user *p = (void __user *)optval; struct sctp_hmac_algo_param *hmacs; - __u16 data_len = 0; - u32 num_idents; - - if (!sctp_auth_enable) - return -EACCES; + __u16 param_len; hmacs = sctp_sk(sk)->ep->auth_hmacs_list; - data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); + param_len = ntohs(hmacs->param_hdr.length); - if (len < sizeof(struct sctp_hmacalgo) + data_len) + if (len < param_len) return -EINVAL; - - len = sizeof(struct sctp_hmacalgo) + data_len; - num_idents = data_len / sizeof(u16); - if (put_user(len, optlen)) return -EFAULT; - if (put_user(num_idents, &p->shmac_num_idents)) - return -EFAULT; - if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) + if (copy_to_user(optval, hmacs->hmac_ids, len)) return -EFAULT; + return 0; } @@ -5249,9 +5219,6 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len, struct sctp_authkeyid val; struct sctp_association *asoc; - if (!sctp_auth_enable) - return -EACCES; - if (len < sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) @@ -5266,12 +5233,6 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len, else val.scact_keynumber = sctp_sk(sk)->ep->active_key_id; - len = sizeof(struct sctp_authkeyid); - if (put_user(len, optlen)) - return -EFAULT; - if (copy_to_user(optval, &val, len)) - return -EFAULT; - return 0; } @@ -5282,16 +5243,13 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; - u32 num_chunks = 0; + u32 num_chunks; char __user *to; - if (!sctp_auth_enable) - return -EACCES; - - if (len < sizeof(struct sctp_authchunks)) + if (len <= sizeof(struct sctp_authchunks)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) + if (copy_from_user(&val, p, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; @@ -5300,21 +5258,20 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, return -EINVAL; ch = asoc->peer.peer_chunks; - if (!ch) - goto num; /* See if the user provided enough room for all the data */ num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < num_chunks) return -EINVAL; - if (copy_to_user(to, ch->chunks, num_chunks)) + len = num_chunks; + if (put_user(len, optlen)) return -EFAULT; -num: - len = sizeof(struct sctp_authchunks) + num_chunks; - if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; + if (copy_to_user(to, ch->chunks, len)) + return -EFAULT; + return 0; } @@ -5325,16 +5282,13 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; - u32 num_chunks = 0; + u32 num_chunks; char __user *to; - if (!sctp_auth_enable) - return -EACCES; - - if (len < sizeof(struct sctp_authchunks)) + if (len <= sizeof(struct sctp_authchunks)) return -EINVAL; - if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) + if (copy_from_user(&val, p, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; @@ -5347,21 +5301,17 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, else ch = sctp_sk(sk)->ep->auth_chunk_list; - if (!ch) - goto num; - num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); - if (len < sizeof(struct sctp_authchunks) + num_chunks) + if (len < num_chunks) return -EINVAL; - if (copy_to_user(to, ch->chunks, num_chunks)) - return -EFAULT; -num: - len = sizeof(struct sctp_authchunks) + num_chunks; + len = num_chunks; if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; + if (copy_to_user(to, ch->chunks, len)) + return -EFAULT; return 0; }