diff --git a/[refs] b/[refs] index f1deeab019d2..4e04e98e39b6 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c4560318cf8fed91e3146a3c1247e11542d91d91 +refs/heads/master: 373a5e0247933695d06c11061baab9673b4e717c diff --git a/trunk/drivers/net/bnx2x.h b/trunk/drivers/net/bnx2x.h index b468f904c7f8..a14dba1afcc5 100644 --- a/trunk/drivers/net/bnx2x.h +++ b/trunk/drivers/net/bnx2x.h @@ -271,7 +271,7 @@ struct bnx2x_fastpath { (fp->tx_pkt_prod != fp->tx_pkt_cons)) #define BNX2X_HAS_RX_WORK(fp) \ - (fp->rx_comp_cons != le16_to_cpu(*fp->rx_cons_sb)) + (fp->rx_comp_cons != rx_cons_sb) #define BNX2X_HAS_WORK(fp) (BNX2X_HAS_RX_WORK(fp) || BNX2X_HAS_TX_WORK(fp)) diff --git a/trunk/drivers/net/bnx2x_main.c b/trunk/drivers/net/bnx2x_main.c index 971576b43687..82deea0a63f5 100644 --- a/trunk/drivers/net/bnx2x_main.c +++ b/trunk/drivers/net/bnx2x_main.c @@ -59,8 +59,8 @@ #include "bnx2x.h" #include "bnx2x_init.h" -#define DRV_MODULE_VERSION "1.45.17" -#define DRV_MODULE_RELDATE "2008/08/13" +#define DRV_MODULE_VERSION "1.45.20" +#define DRV_MODULE_RELDATE "2008/08/25" #define BNX2X_BC_VER 0x040200 /* Time in jiffies before concluding the transmitter is hung */ @@ -1717,8 +1717,8 @@ static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource) return -EEXIST; } - /* Try for 1 second every 5ms */ - for (cnt = 0; cnt < 200; cnt++) { + /* Try for 5 second every 5ms */ + for (cnt = 0; cnt < 1000; cnt++) { /* Try to acquire the lock */ REG_WR(bp, hw_lock_control_reg + 4, resource_bit); lock_status = REG_RD(bp, hw_lock_control_reg); @@ -2550,6 +2550,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) BNX2X_ERR("SPIO5 hw attention\n"); switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { + case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G: case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: /* Fan failure attention */ @@ -4605,6 +4606,17 @@ static void bnx2x_init_internal_common(struct bnx2x *bp) { int i; + if (bp->flags & TPA_ENABLE_FLAG) { + struct tstorm_eth_tpa_exist tpa = {0}; + + tpa.tpa_exist = 1; + + REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET, + ((u32 *)&tpa)[0]); + REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4, + ((u32 *)&tpa)[1]); + } + /* Zero this manually as its initialization is currently missing in the initTool */ for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) @@ -5337,6 +5349,7 @@ static int bnx2x_init_common(struct bnx2x *bp) } switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { + case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G: case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: /* Fan failure is indicated by SPIO 5 */ bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5, @@ -5363,17 +5376,6 @@ static int bnx2x_init_common(struct bnx2x *bp) enable_blocks_attention(bp); - if (bp->flags & TPA_ENABLE_FLAG) { - struct tstorm_eth_tpa_exist tmp = {0}; - - tmp.tpa_exist = 1; - - REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET, - ((u32 *)&tmp)[0]); - REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4, - ((u32 *)&tmp)[1]); - } - if (!BP_NOMCP(bp)) { bnx2x_acquire_phy_lock(bp); bnx2x_common_init_phy(bp, bp->common.shmem_base); @@ -5531,6 +5533,7 @@ static int bnx2x_init_port(struct bnx2x *bp) /* Port DMAE comes here */ switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) { + case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G: case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G: /* add SPIO 5 to group 0 */ val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); @@ -6055,6 +6058,44 @@ static int bnx2x_req_irq(struct bnx2x *bp) return rc; } +static void bnx2x_napi_enable(struct bnx2x *bp) +{ + int i; + + for_each_queue(bp, i) + napi_enable(&bnx2x_fp(bp, i, napi)); +} + +static void bnx2x_napi_disable(struct bnx2x *bp) +{ + int i; + + for_each_queue(bp, i) + napi_disable(&bnx2x_fp(bp, i, napi)); +} + +static void bnx2x_netif_start(struct bnx2x *bp) +{ + if (atomic_dec_and_test(&bp->intr_sem)) { + if (netif_running(bp->dev)) { + if (bp->state == BNX2X_STATE_OPEN) + netif_wake_queue(bp->dev); + bnx2x_napi_enable(bp); + bnx2x_int_enable(bp); + } + } +} + +static void bnx2x_netif_stop(struct bnx2x *bp) +{ + bnx2x_int_disable_sync(bp); + if (netif_running(bp->dev)) { + bnx2x_napi_disable(bp); + netif_tx_disable(bp->dev); + bp->dev->trans_start = jiffies; /* prevent tx timeout */ + } +} + /* * Init service functions */ @@ -6338,7 +6379,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) rc = bnx2x_init_hw(bp, load_code); if (rc) { BNX2X_ERR("HW init failed, aborting\n"); - goto load_error; + goto load_int_disable; } /* Setup NIC internals and enable interrupts */ @@ -6350,7 +6391,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) if (!load_code) { BNX2X_ERR("MCP response failure, aborting\n"); rc = -EBUSY; - goto load_int_disable; + goto load_rings_free; } } @@ -6360,8 +6401,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Enable Rx interrupt handling before sending the ramrod as it's completed on Rx FP queue */ - for_each_queue(bp, i) - napi_enable(&bnx2x_fp(bp, i, napi)); + bnx2x_napi_enable(bp); /* Enable interrupt handling */ atomic_set(&bp->intr_sem, 0); @@ -6369,7 +6409,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) rc = bnx2x_setup_leading(bp); if (rc) { BNX2X_ERR("Setup leading failed!\n"); - goto load_stop_netif; + goto load_netif_stop; } if (CHIP_IS_E1H(bp)) @@ -6382,7 +6422,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) for_each_nondefault_queue(bp, i) { rc = bnx2x_setup_multi(bp, i); if (rc) - goto load_stop_netif; + goto load_netif_stop; } if (CHIP_IS_E1(bp)) @@ -6427,20 +6467,17 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) return 0; -load_stop_netif: +load_netif_stop: + bnx2x_napi_disable(bp); +load_rings_free: + /* Free SKBs, SGEs, TPA pool and driver internals */ + bnx2x_free_skbs(bp); for_each_queue(bp, i) - napi_disable(&bnx2x_fp(bp, i, napi)); - + bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); load_int_disable: bnx2x_int_disable_sync(bp); - /* Release IRQs */ bnx2x_free_irq(bp); - - /* Free SKBs, SGEs, TPA pool and driver internals */ - bnx2x_free_skbs(bp); - for_each_queue(bp, i) - bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); load_error: bnx2x_free_mem(bp); @@ -6455,7 +6492,7 @@ static int bnx2x_stop_multi(struct bnx2x *bp, int index) /* halt the connection */ bp->fp[index].state = BNX2X_FP_STATE_HALTING; - bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0); + bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0); /* Wait for completion */ rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index, @@ -6613,11 +6650,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) bp->rx_mode = BNX2X_RX_MODE_NONE; bnx2x_set_storm_rx_mode(bp); - if (netif_running(bp->dev)) { - netif_tx_disable(bp->dev); - bp->dev->trans_start = jiffies; /* prevent tx timeout */ - } - + bnx2x_netif_stop(bp); + if (!netif_running(bp->dev)) + bnx2x_napi_disable(bp); del_timer_sync(&bp->timer); SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); @@ -6631,9 +6666,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) smp_rmb(); while (BNX2X_HAS_TX_WORK(fp)) { - if (!netif_running(bp->dev)) - bnx2x_tx_int(fp, 1000); - + bnx2x_tx_int(fp, 1000); if (!cnt) { BNX2X_ERR("timeout waiting for queue[%d]\n", i); @@ -6649,46 +6682,12 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) smp_rmb(); } } - /* Give HW time to discard old tx messages */ msleep(1); - for_each_queue(bp, i) - napi_disable(&bnx2x_fp(bp, i, napi)); - /* Disable interrupts after Tx and Rx are disabled on stack level */ - bnx2x_int_disable_sync(bp); - /* Release IRQs */ bnx2x_free_irq(bp); - if (unload_mode == UNLOAD_NORMAL) - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - - else if (bp->flags & NO_WOL_FLAG) { - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; - if (CHIP_IS_E1H(bp)) - REG_WR(bp, MISC_REG_E1HMF_MODE, 0); - - } else if (bp->wol) { - u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; - u8 *mac_addr = bp->dev->dev_addr; - u32 val; - /* The mac address is written to entries 1-4 to - preserve entry 0 which is used by the PMF */ - u8 entry = (BP_E1HVN(bp) + 1)*8; - - val = (mac_addr[0] << 8) | mac_addr[1]; - EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); - - val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | - (mac_addr[4] << 8) | mac_addr[5]; - EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); - - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; - - } else - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - if (CHIP_IS_E1(bp)) { struct mac_configuration_cmd *config = bnx2x_sp(bp, mcast_config); @@ -6711,14 +6710,41 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); } else { /* E1H */ + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); + bnx2x_set_mac_addr_e1h(bp, 0); for (i = 0; i < MC_HASH_SIZE; i++) REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); } - if (CHIP_IS_E1H(bp)) - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); + if (unload_mode == UNLOAD_NORMAL) + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; + + else if (bp->flags & NO_WOL_FLAG) { + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; + if (CHIP_IS_E1H(bp)) + REG_WR(bp, MISC_REG_E1HMF_MODE, 0); + + } else if (bp->wol) { + u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; + u8 *mac_addr = bp->dev->dev_addr; + u32 val; + /* The mac address is written to entries 1-4 to + preserve entry 0 which is used by the PMF */ + u8 entry = (BP_E1HVN(bp) + 1)*8; + + val = (mac_addr[0] << 8) | mac_addr[1]; + EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val); + + val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]; + EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); + + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; + + } else + reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; /* Close multi and leading connections Completions for ramrods are collected in a synchronous way */ @@ -6821,6 +6847,10 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) */ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); val = REG_RD(bp, DORQ_REG_NORM_CID_OFST); + if (val == 0x7) + REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); + if (val == 0x7) { u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; /* save our func */ @@ -6898,7 +6928,6 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp) (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) & DRV_MSG_SEQ_NUMBER_MASK); } - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI); } } @@ -8617,34 +8646,6 @@ static int bnx2x_test_memory(struct bnx2x *bp) return rc; } -static void bnx2x_netif_start(struct bnx2x *bp) -{ - int i; - - if (atomic_dec_and_test(&bp->intr_sem)) { - if (netif_running(bp->dev)) { - bnx2x_int_enable(bp); - for_each_queue(bp, i) - napi_enable(&bnx2x_fp(bp, i, napi)); - if (bp->state == BNX2X_STATE_OPEN) - netif_wake_queue(bp->dev); - } - } -} - -static void bnx2x_netif_stop(struct bnx2x *bp) -{ - int i; - - if (netif_running(bp->dev)) { - netif_tx_disable(bp->dev); - bp->dev->trans_start = jiffies; /* prevent tx timeout */ - for_each_queue(bp, i) - napi_disable(&bnx2x_fp(bp, i, napi)); - } - bnx2x_int_disable_sync(bp); -} - static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up) { int cnt = 1000; @@ -9250,6 +9251,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) napi); struct bnx2x *bp = fp->bp; int work_done = 0; + u16 rx_cons_sb; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) @@ -9265,10 +9267,16 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) if (BNX2X_HAS_TX_WORK(fp)) bnx2x_tx_int(fp, budget); + rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); + if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) + rx_cons_sb++; if (BNX2X_HAS_RX_WORK(fp)) work_done = bnx2x_rx_int(fp, budget); rmb(); /* BNX2X_HAS_WORK() reads the status block */ + rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb); + if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT) + rx_cons_sb++; /* must not complete if we consumed full budget */ if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) { @@ -9484,8 +9492,7 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) fp_index = (smp_processor_id() % bp->num_queues); fp = &bp->fp[fp_index]; - if (unlikely(bnx2x_tx_avail(bp->fp) < - (skb_shinfo(skb)->nr_frags + 3))) { + if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { bp->eth_stats.driver_xoff++, netif_stop_queue(dev); BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); @@ -9548,7 +9555,6 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_bd->vlan = cpu_to_le16(pkt_prod); if (xmit_type) { - /* turn on parsing and get a BD */ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); pbd = (void *)&fp->tx_desc_ring[bd_prod]; diff --git a/trunk/include/net/pkt_sched.h b/trunk/include/net/pkt_sched.h index 853fe83d9f37..b786a5b09253 100644 --- a/trunk/include/net/pkt_sched.h +++ b/trunk/include/net/pkt_sched.h @@ -78,6 +78,7 @@ extern struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, extern int register_qdisc(struct Qdisc_ops *qops); extern int unregister_qdisc(struct Qdisc_ops *qops); +extern void qdisc_list_del(struct Qdisc *q); extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle); extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle); extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, diff --git a/trunk/include/net/sch_generic.h b/trunk/include/net/sch_generic.h index 84d25f2e6188..b1d2cfea89c5 100644 --- a/trunk/include/net/sch_generic.h +++ b/trunk/include/net/sch_generic.h @@ -193,6 +193,11 @@ static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc) return qdisc->dev_queue->qdisc; } +static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc) +{ + return qdisc->dev_queue->qdisc_sleeping; +} + /* The qdisc root lock is a mechanism by which to top level * of a qdisc tree can be locked from any qdisc node in the * forest. This allows changing the configuration of some diff --git a/trunk/net/ipv4/icmp.c b/trunk/net/ipv4/icmp.c index 860558633b2c..55c355e63234 100644 --- a/trunk/net/ipv4/icmp.c +++ b/trunk/net/ipv4/icmp.c @@ -204,18 +204,22 @@ static struct sock *icmp_sk(struct net *net) return net->ipv4.icmp_sk[smp_processor_id()]; } -static inline int icmp_xmit_lock(struct sock *sk) +static inline struct sock *icmp_xmit_lock(struct net *net) { + struct sock *sk; + local_bh_disable(); + sk = icmp_sk(net); + if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path signals a * dst_link_failure() for an outgoing ICMP packet. */ local_bh_enable(); - return 1; + return NULL; } - return 0; + return sk; } static inline void icmp_xmit_unlock(struct sock *sk) @@ -354,15 +358,17 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) struct ipcm_cookie ipc; struct rtable *rt = skb->rtable; struct net *net = dev_net(rt->u.dst.dev); - struct sock *sk = icmp_sk(net); - struct inet_sock *inet = inet_sk(sk); + struct sock *sk; + struct inet_sock *inet; __be32 daddr; if (ip_options_echo(&icmp_param->replyopts, skb)) return; - if (icmp_xmit_lock(sk)) + sk = icmp_xmit_lock(net); + if (sk == NULL) return; + inet = inet_sk(sk); icmp_param->data.icmph.checksum = 0; @@ -419,7 +425,6 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) if (!rt) goto out; net = dev_net(rt->u.dst.dev); - sk = icmp_sk(net); /* * Find the original header. It is expected to be valid, of course. @@ -483,7 +488,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) } } - if (icmp_xmit_lock(sk)) + sk = icmp_xmit_lock(net); + if (sk == NULL) return; /* diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c index cca921ea8550..e91bafeb32f4 100644 --- a/trunk/net/ipv4/route.c +++ b/trunk/net/ipv4/route.c @@ -3116,14 +3116,23 @@ static ctl_table ipv4_route_table[] = { { .ctl_name = 0 } }; -static __net_initdata struct ctl_path ipv4_route_path[] = { +static struct ctl_table empty[1]; + +static struct ctl_table ipv4_skeleton[] = +{ + { .procname = "route", .ctl_name = NET_IPV4_ROUTE, + .child = ipv4_route_table}, + { .procname = "neigh", .ctl_name = NET_IPV4_NEIGH, + .child = empty}, + { } +}; + +static __net_initdata struct ctl_path ipv4_path[] = { { .procname = "net", .ctl_name = CTL_NET, }, { .procname = "ipv4", .ctl_name = NET_IPV4, }, - { .procname = "route", .ctl_name = NET_IPV4_ROUTE, }, { }, }; - static struct ctl_table ipv4_route_flush_table[] = { { .ctl_name = NET_IPV4_ROUTE_FLUSH, @@ -3136,6 +3145,13 @@ static struct ctl_table ipv4_route_flush_table[] = { { .ctl_name = 0 }, }; +static __net_initdata struct ctl_path ipv4_route_path[] = { + { .procname = "net", .ctl_name = CTL_NET, }, + { .procname = "ipv4", .ctl_name = NET_IPV4, }, + { .procname = "route", .ctl_name = NET_IPV4_ROUTE, }, + { }, +}; + static __net_init int sysctl_route_net_init(struct net *net) { struct ctl_table *tbl; @@ -3287,7 +3303,7 @@ int __init ip_rt_init(void) */ void __init ip_static_sysctl_init(void) { - register_sysctl_paths(ipv4_route_path, ipv4_route_table); + register_sysctl_paths(ipv4_path, ipv4_skeleton); } #endif diff --git a/trunk/net/ipv6/addrconf.c b/trunk/net/ipv6/addrconf.c index e2d3b7580b76..7b6a584b62dd 100644 --- a/trunk/net/ipv6/addrconf.c +++ b/trunk/net/ipv6/addrconf.c @@ -1688,6 +1688,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev, .fc_dst_len = plen, .fc_flags = RTF_UP | flags, .fc_nlinfo.nl_net = dev_net(dev), + .fc_protocol = RTPROT_KERNEL, }; ipv6_addr_copy(&cfg.fc_dst, pfx); diff --git a/trunk/net/ipv6/icmp.c b/trunk/net/ipv6/icmp.c index abedf95fdf2d..b3157a0cc15d 100644 --- a/trunk/net/ipv6/icmp.c +++ b/trunk/net/ipv6/icmp.c @@ -91,19 +91,22 @@ static struct inet6_protocol icmpv6_protocol = { .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; -static __inline__ int icmpv6_xmit_lock(struct sock *sk) +static __inline__ struct sock *icmpv6_xmit_lock(struct net *net) { + struct sock *sk; + local_bh_disable(); + sk = icmpv6_sk(net); if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path (f.e. SIT or * ip6ip6 tunnel) signals dst_link_failure() for an * outgoing ICMP6 packet. */ local_bh_enable(); - return 1; + return NULL; } - return 0; + return sk; } static __inline__ void icmpv6_xmit_unlock(struct sock *sk) @@ -392,11 +395,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info, fl.fl_icmp_code = code; security_skb_classify_flow(skb, &fl); - sk = icmpv6_sk(net); - np = inet6_sk(sk); - - if (icmpv6_xmit_lock(sk)) + sk = icmpv6_xmit_lock(net); + if (sk == NULL) return; + np = inet6_sk(sk); if (!icmpv6_xrlim_allow(sk, type, &fl)) goto out; @@ -539,11 +541,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb) fl.fl_icmp_type = ICMPV6_ECHO_REPLY; security_skb_classify_flow(skb, &fl); - sk = icmpv6_sk(net); - np = inet6_sk(sk); - - if (icmpv6_xmit_lock(sk)) + sk = icmpv6_xmit_lock(net); + if (sk == NULL) return; + np = inet6_sk(sk); if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) fl.oif = np->mcast_oif; diff --git a/trunk/net/ipv6/sysctl_net_ipv6.c b/trunk/net/ipv6/sysctl_net_ipv6.c index e6dfaeac6be3..587f8f60c489 100644 --- a/trunk/net/ipv6/sysctl_net_ipv6.c +++ b/trunk/net/ipv6/sysctl_net_ipv6.c @@ -156,7 +156,7 @@ static struct ctl_table_header *ip6_base; int ipv6_static_sysctl_register(void) { static struct ctl_table empty[1]; - ip6_base = register_net_sysctl_rotable(net_ipv6_ctl_path, empty); + ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty); if (ip6_base == NULL) return -ENOMEM; return 0; diff --git a/trunk/net/sched/sch_api.c b/trunk/net/sched/sch_api.c index ef0efeca6352..e7fb9e0d21b4 100644 --- a/trunk/net/sched/sch_api.c +++ b/trunk/net/sched/sch_api.c @@ -199,19 +199,53 @@ struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) return NULL; } +/* + * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen() + * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue() + */ +static DEFINE_SPINLOCK(qdisc_list_lock); + +static void qdisc_list_add(struct Qdisc *q) +{ + if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { + spin_lock_bh(&qdisc_list_lock); + list_add_tail(&q->list, &qdisc_root_sleeping(q)->list); + spin_unlock_bh(&qdisc_list_lock); + } +} + +void qdisc_list_del(struct Qdisc *q) +{ + if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { + spin_lock_bh(&qdisc_list_lock); + list_del(&q->list); + spin_unlock_bh(&qdisc_list_lock); + } +} +EXPORT_SYMBOL(qdisc_list_del); + struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) { unsigned int i; + struct Qdisc *q; + + spin_lock_bh(&qdisc_list_lock); for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - struct Qdisc *q, *txq_root = txq->qdisc_sleeping; + struct Qdisc *txq_root = txq->qdisc_sleeping; q = qdisc_match_from_root(txq_root, handle); if (q) - return q; + goto unlock; } - return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); + + q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); + +unlock: + spin_unlock_bh(&qdisc_list_lock); + + return q; } static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) @@ -444,6 +478,10 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) { ktime_t time; + if (test_bit(__QDISC_STATE_DEACTIVATED, + &qdisc_root_sleeping(wd->qdisc)->state)) + return; + wd->qdisc->flags |= TCQ_F_THROTTLED; time = ktime_set(0, 0); time = ktime_add_ns(time, PSCHED_US2NS(expires)); @@ -806,8 +844,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, goto err_out3; } } - if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS)) - list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list); + + qdisc_list_add(sch); return sch; } diff --git a/trunk/net/sched/sch_cbq.c b/trunk/net/sched/sch_cbq.c index 47ef492c4ff4..8fa90d68ec6d 100644 --- a/trunk/net/sched/sch_cbq.c +++ b/trunk/net/sched/sch_cbq.c @@ -521,6 +521,10 @@ static void cbq_ovl_delay(struct cbq_class *cl) struct cbq_sched_data *q = qdisc_priv(cl->qdisc); psched_tdiff_t delay = cl->undertime - q->now; + if (test_bit(__QDISC_STATE_DEACTIVATED, + &qdisc_root_sleeping(cl->qdisc)->state)) + return; + if (!cl->delayed) { psched_time_t sched = q->now; ktime_t expires; diff --git a/trunk/net/sched/sch_generic.c b/trunk/net/sched/sch_generic.c index c3ed4d44fc14..5f0ade7806a7 100644 --- a/trunk/net/sched/sch_generic.c +++ b/trunk/net/sched/sch_generic.c @@ -526,10 +526,9 @@ void qdisc_destroy(struct Qdisc *qdisc) !atomic_dec_and_test(&qdisc->refcnt)) return; - if (qdisc->parent) - list_del(&qdisc->list); - #ifdef CONFIG_NET_SCHED + qdisc_list_del(qdisc); + qdisc_put_stab(qdisc->stab); #endif gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); diff --git a/trunk/net/sctp/auth.c b/trunk/net/sctp/auth.c index 675a5c3e68a6..1fcb4cf2f4c9 100644 --- a/trunk/net/sctp/auth.c +++ b/trunk/net/sctp/auth.c @@ -80,6 +80,10 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp) { struct sctp_auth_bytes *key; + /* Verify that we are not going to overflow INT_MAX */ + if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes)) + return NULL; + /* Allocate the shared key */ key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp); if (!key) diff --git a/trunk/net/sctp/endpointola.c b/trunk/net/sctp/endpointola.c index e39a0cdef184..4c8d9f45ce09 100644 --- a/trunk/net/sctp/endpointola.c +++ b/trunk/net/sctp/endpointola.c @@ -103,6 +103,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, /* Initialize the CHUNKS parameter */ auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS; + auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t)); /* If the Add-IP functionality is enabled, we must * authenticate, ASCONF and ASCONF-ACK chunks @@ -110,8 +111,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, if (sctp_addip_enable) { auth_chunks->chunks[0] = SCTP_CID_ASCONF; auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; - auth_chunks->param_hdr.length = - htons(sizeof(sctp_paramhdr_t) + 2); + auth_chunks->param_hdr.length += htons(2); } } diff --git a/trunk/net/sctp/socket.c b/trunk/net/sctp/socket.c index dbb79adf8f3c..afa952e726d7 100644 --- a/trunk/net/sctp/socket.c +++ b/trunk/net/sctp/socket.c @@ -3055,6 +3055,9 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk, { struct sctp_authchunk val; + if (!sctp_auth_enable) + return -EACCES; + if (optlen != sizeof(struct sctp_authchunk)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) @@ -3085,6 +3088,9 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk, struct sctp_hmacalgo *hmacs; int err; + if (!sctp_auth_enable) + return -EACCES; + if (optlen < sizeof(struct sctp_hmacalgo)) return -EINVAL; @@ -3123,6 +3129,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk, struct sctp_association *asoc; int ret; + if (!sctp_auth_enable) + return -EACCES; + if (optlen <= sizeof(struct sctp_authkey)) return -EINVAL; @@ -3135,6 +3144,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk, goto out; } + if (authkey->sca_keylength > optlen) { + ret = -EINVAL; + goto out; + } + asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { ret = -EINVAL; @@ -3160,6 +3174,9 @@ static int sctp_setsockopt_active_key(struct sock *sk, struct sctp_authkeyid val; struct sctp_association *asoc; + if (!sctp_auth_enable) + return -EACCES; + if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) @@ -3185,6 +3202,9 @@ static int sctp_setsockopt_del_key(struct sock *sk, struct sctp_authkeyid val; struct sctp_association *asoc; + if (!sctp_auth_enable) + return -EACCES; + if (optlen != sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, optlen)) @@ -5197,19 +5217,29 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len, static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, char __user *optval, int __user *optlen) { + struct sctp_hmacalgo __user *p = (void __user *)optval; struct sctp_hmac_algo_param *hmacs; - __u16 param_len; + __u16 data_len = 0; + u32 num_idents; + + if (!sctp_auth_enable) + return -EACCES; hmacs = sctp_sk(sk)->ep->auth_hmacs_list; - param_len = ntohs(hmacs->param_hdr.length); + data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); - if (len < param_len) + if (len < sizeof(struct sctp_hmacalgo) + data_len) return -EINVAL; + + len = sizeof(struct sctp_hmacalgo) + data_len; + num_idents = data_len / sizeof(u16); + if (put_user(len, optlen)) return -EFAULT; - if (copy_to_user(optval, hmacs->hmac_ids, len)) + if (put_user(num_idents, &p->shmac_num_idents)) + return -EFAULT; + if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) return -EFAULT; - return 0; } @@ -5219,6 +5249,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len, struct sctp_authkeyid val; struct sctp_association *asoc; + if (!sctp_auth_enable) + return -EACCES; + if (len < sizeof(struct sctp_authkeyid)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) @@ -5233,6 +5266,12 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len, else val.scact_keynumber = sctp_sk(sk)->ep->active_key_id; + len = sizeof(struct sctp_authkeyid); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + return 0; } @@ -5243,13 +5282,16 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; - u32 num_chunks; + u32 num_chunks = 0; char __user *to; - if (len <= sizeof(struct sctp_authchunks)) + if (!sctp_auth_enable) + return -EACCES; + + if (len < sizeof(struct sctp_authchunks)) return -EINVAL; - if (copy_from_user(&val, p, sizeof(struct sctp_authchunks))) + if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; @@ -5258,20 +5300,21 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, return -EINVAL; ch = asoc->peer.peer_chunks; + if (!ch) + goto num; /* See if the user provided enough room for all the data */ num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); if (len < num_chunks) return -EINVAL; - len = num_chunks; - if (put_user(len, optlen)) + if (copy_to_user(to, ch->chunks, num_chunks)) return -EFAULT; +num: + len = sizeof(struct sctp_authchunks) + num_chunks; + if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; - if (copy_to_user(to, ch->chunks, len)) - return -EFAULT; - return 0; } @@ -5282,13 +5325,16 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; - u32 num_chunks; + u32 num_chunks = 0; char __user *to; - if (len <= sizeof(struct sctp_authchunks)) + if (!sctp_auth_enable) + return -EACCES; + + if (len < sizeof(struct sctp_authchunks)) return -EINVAL; - if (copy_from_user(&val, p, sizeof(struct sctp_authchunks))) + if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) return -EFAULT; to = p->gauth_chunks; @@ -5301,17 +5347,21 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, else ch = sctp_sk(sk)->ep->auth_chunk_list; + if (!ch) + goto num; + num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); - if (len < num_chunks) + if (len < sizeof(struct sctp_authchunks) + num_chunks) return -EINVAL; - len = num_chunks; + if (copy_to_user(to, ch->chunks, num_chunks)) + return -EFAULT; +num: + len = sizeof(struct sctp_authchunks) + num_chunks; if (put_user(len, optlen)) return -EFAULT; if (put_user(num_chunks, &p->gauth_number_of_chunks)) return -EFAULT; - if (copy_to_user(to, ch->chunks, len)) - return -EFAULT; return 0; }