diff --git a/[refs] b/[refs] index a8735e4ee8d8..cc328b5fd716 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 054581e6c1eb314d54d4747fba545e9802be29da +refs/heads/master: a73f89a61f92b364f0b4a3be412b5b70553afc23 diff --git a/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 1f78b63d5efe..9cc15701101b 100644 --- a/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/trunk/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -261,6 +261,7 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter) if ((phy_data & BMSR_LSTATUS) == 0) { /* link down */ netif_carrier_off(netdev); + netif_stop_queue(netdev); hw->hibernate = true; if (atl1c_reset_mac(hw) != 0) if (netif_msg_hw(adapter)) diff --git a/trunk/drivers/net/ethernet/broadcom/b44.c b/trunk/drivers/net/ethernet/broadcom/b44.c index d09c6b583d17..46b8b7d81633 100644 --- a/trunk/drivers/net/ethernet/broadcom/b44.c +++ b/trunk/drivers/net/ethernet/broadcom/b44.c @@ -656,7 +656,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); - skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA); + skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); if (skb == NULL) return -ENOMEM; mapping = dma_map_single(bp->sdev->dma_dev, skb->data, @@ -967,7 +967,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev) dma_unmap_single(bp->sdev->dma_dev, mapping, len, DMA_TO_DEVICE); - bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA); + bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA); if (!bounce_skb) goto err_out; diff --git a/trunk/drivers/net/ethernet/broadcom/cnic.c b/trunk/drivers/net/ethernet/broadcom/cnic.c index 3c95065e0def..c95e7b5e2b85 100644 --- a/trunk/drivers/net/ethernet/broadcom/cnic.c +++ b/trunk/drivers/net/ethernet/broadcom/cnic.c @@ -1053,13 +1053,12 @@ static int cnic_init_uio(struct cnic_dev *dev) uinfo = &udev->cnic_uinfo; - uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0); + uinfo->mem[0].addr = dev->netdev->base_addr; uinfo->mem[0].internal_addr = dev->regview; + uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; uinfo->mem[0].memtype = UIO_MEM_PHYS; if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { - uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID + - TX_MAX_TSS_RINGS + 1); uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & PAGE_MASK; if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) @@ -1069,8 +1068,6 @@ static int cnic_init_uio(struct cnic_dev *dev) uinfo->name = "bnx2_cnic"; } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { - uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0); - uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & PAGE_MASK; uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index e242104ab471..18ca3bcadf0c 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6647,11 +6647,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) return -EINVAL; } - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - e_err(drv, "Enable failed, SR-IOV enabled\n"); - return -EINVAL; - } - /* Hardware supports up to 8 traffic classes */ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || (hw->mac.type == ixgbe_mac_82598EB && diff --git a/trunk/drivers/net/phy/mdio-mux.c b/trunk/drivers/net/phy/mdio-mux.c index 5c120189ec86..39ea0674dcde 100644 --- a/trunk/drivers/net/phy/mdio-mux.c +++ b/trunk/drivers/net/phy/mdio-mux.c @@ -46,13 +46,7 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum) struct mdio_mux_parent_bus *pb = cb->parent; int r; - /* In theory multiple mdio_mux could be stacked, thus creating - * more than a single level of nesting. But in practice, - * SINGLE_DEPTH_NESTING will cover the vast majority of use - * cases. We use it, instead of trying to handle the general - * case. - */ - mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING); + mutex_lock(&pb->mii_bus->mdio_lock); r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); if (r) goto out; @@ -77,7 +71,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id, int r; - mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING); + mutex_lock(&pb->mii_bus->mdio_lock); r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); if (r) goto out; diff --git a/trunk/drivers/net/usb/qmi_wwan.c b/trunk/drivers/net/usb/qmi_wwan.c index a051cedd64bd..b01960fcfbc9 100644 --- a/trunk/drivers/net/usb/qmi_wwan.c +++ b/trunk/drivers/net/usb/qmi_wwan.c @@ -346,15 +346,6 @@ static const struct driver_info qmi_wwan_force_int1 = { .data = BIT(1), /* interface whitelist bitmap */ }; -static const struct driver_info qmi_wwan_force_int2 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind_shared, - .manage_power = qmi_wwan_manage_power, - .data = BIT(2), /* interface whitelist bitmap */ -}; - static const struct driver_info qmi_wwan_force_int3 = { .description = "Qualcomm WWAN/QMI device", .flags = FLAG_WWAN, @@ -507,15 +498,6 @@ static const struct usb_device_id products[] = { .bInterfaceProtocol = 0xff, .driver_info = (unsigned long)&qmi_wwan_force_int4, }, - { /* ZTE MF60 */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x1402, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int2, - }, { /* Sierra Wireless MC77xx in QMI mode */ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = 0x1199, diff --git a/trunk/drivers/net/wireless/b43legacy/dma.c b/trunk/drivers/net/wireless/b43legacy/dma.c index c8baf020c20f..f1f8bd09bd87 100644 --- a/trunk/drivers/net/wireless/b43legacy/dma.c +++ b/trunk/drivers/net/wireless/b43legacy/dma.c @@ -1072,7 +1072,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring, meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); /* create a bounce buffer in zone_dma on mapping failure. */ if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { - bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); + bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); if (!bounce_skb) { ring->current_slot = old_top_slot; ring->used_slots = old_used_slots; diff --git a/trunk/net/core/netprio_cgroup.c b/trunk/net/core/netprio_cgroup.c index aa907ed466ea..5b8aa2fae48b 100644 --- a/trunk/net/core/netprio_cgroup.c +++ b/trunk/net/core/netprio_cgroup.c @@ -49,9 +49,8 @@ static int get_prioidx(u32 *prio) return -ENOSPC; } set_bit(prioidx, prioidx_map); - if (atomic_read(&max_prioidx) < prioidx) - atomic_set(&max_prioidx, prioidx); spin_unlock_irqrestore(&prioidx_map_lock, flags); + atomic_set(&max_prioidx, prioidx); *prio = prioidx; return 0; } diff --git a/trunk/net/ieee802154/dgram.c b/trunk/net/ieee802154/dgram.c index 16705611589a..6fbb2ad7bb6d 100644 --- a/trunk/net/ieee802154/dgram.c +++ b/trunk/net/ieee802154/dgram.c @@ -230,12 +230,6 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, mtu = dev->mtu; pr_debug("name = %s, mtu = %u\n", dev->name, mtu); - if (size > mtu) { - pr_debug("size = %Zu, mtu = %u\n", size, mtu); - err = -EINVAL; - goto out_dev; - } - hlen = LL_RESERVED_SPACE(dev); tlen = dev->needed_tailroom; skb = sock_alloc_send_skb(sk, hlen + tlen + size, @@ -264,6 +258,12 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk, if (err < 0) goto out_skb; + if (size > mtu) { + pr_debug("size = %Zu, mtu = %u\n", size, mtu); + err = -EINVAL; + goto out_skb; + } + skb->dev = dev; skb->sk = sk; skb->protocol = htons(ETH_P_IEEE802154); diff --git a/trunk/net/netfilter/xt_set.c b/trunk/net/netfilter/xt_set.c index 035960ec5cb9..c6f7db720d84 100644 --- a/trunk/net/netfilter/xt_set.c +++ b/trunk/net/netfilter/xt_set.c @@ -16,6 +16,7 @@ #include #include +#include MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik "); @@ -310,7 +311,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) info->del_set.flags, 0, UINT_MAX); /* Normalize to fit into jiffies */ - if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC) + if (add_opt.timeout != IPSET_NO_TIMEOUT && + add_opt.timeout > UINT_MAX/MSEC_PER_SEC) add_opt.timeout = UINT_MAX/MSEC_PER_SEC; if (info->add_set.index != IPSET_INVALID_ID) ip_set_add(info->add_set.index, skb, par, &add_opt); diff --git a/trunk/net/sched/sch_netem.c b/trunk/net/sched/sch_netem.c index c412ad0d0308..a2a95aabf9c2 100644 --- a/trunk/net/sched/sch_netem.c +++ b/trunk/net/sched/sch_netem.c @@ -331,22 +331,29 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche return PSCHED_NS2TICKS(ticks); } -static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) +static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) { struct sk_buff_head *list = &sch->q; psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; - struct sk_buff *skb = skb_peek_tail(list); + struct sk_buff *skb; - /* Optimize for add at tail */ - if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send)) - return __skb_queue_tail(list, nskb); + if (likely(skb_queue_len(list) < sch->limit)) { + skb = skb_peek_tail(list); + /* Optimize for add at tail */ + if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send)) + return qdisc_enqueue_tail(nskb, sch); - skb_queue_reverse_walk(list, skb) { - if (tnext >= netem_skb_cb(skb)->time_to_send) - break; + skb_queue_reverse_walk(list, skb) { + if (tnext >= netem_skb_cb(skb)->time_to_send) + break; + } + + __skb_queue_after(list, skb, nskb); + sch->qstats.backlog += qdisc_pkt_len(nskb); + return NET_XMIT_SUCCESS; } - __skb_queue_after(list, skb, nskb); + return qdisc_reshape_fail(nskb, sch); } /* @@ -361,6 +368,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) /* We don't fill cb now as skb_unshare() may invalidate it */ struct netem_skb_cb *cb; struct sk_buff *skb2; + int ret; int count = 1; /* Random duplication */ @@ -411,11 +419,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); } - if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) - return qdisc_reshape_fail(skb, sch); - - sch->qstats.backlog += qdisc_pkt_len(skb); - cb = netem_skb_cb(skb); if (q->gap == 0 || /* not doing reordering */ q->counter < q->gap - 1 || /* inside last reordering gap */ @@ -447,7 +450,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) cb->time_to_send = now + delay; ++q->counter; - tfifo_enqueue(skb, sch); + ret = tfifo_enqueue(skb, sch); } else { /* * Do re-ordering by putting one out of N packets at the front @@ -457,7 +460,16 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) q->counter = 0; __skb_queue_head(&sch->q, skb); + sch->qstats.backlog += qdisc_pkt_len(skb); sch->qstats.requeues++; + ret = NET_XMIT_SUCCESS; + } + + if (ret != NET_XMIT_SUCCESS) { + if (net_xmit_drop_count(ret)) { + sch->qstats.drops++; + return ret; + } } return NET_XMIT_SUCCESS;