Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Browse files Browse the repository at this point in the history
Pull networking fixes from David Miller:

 1) Handle multicast packets properly in fast-RX path of mac80211, from
    Johannes Berg.

 2) Because of a logic bug, the user can't actually force SW
    checksumming on r8152 devices. This makes diagnosis of hw
    checksumming bugs really annoying. Fix from Hayes Wang.

 3) VXLAN route lookup does not take the source and destination ports
    into account, which means IPSEC policies cannot be matched properly.
    Fix from Martynas Pumputis.

 4) Do proper RCU locking in netvsc callbacks, from Stephen Hemminger.

 5) Fix SKB leaks in mlxsw driver, from Arkadi Sharshevsky.

 6) If lwtunnel_fill_encap() fails, we do not abort the netlink message
    construction properly in fib_dump_info(), from David Ahern.

 7) Do not use kernel stack for DMA buffers in atusb driver, from Stefan
    Schmidt.

 8) Openvswitch conntack actions need to maintain a correct checksum,
    fix from Lance Richardson.

 9) ax25_disconnect() is missing a check for ax25->sk being NULL, in
    fact it already checks this, but not in all of the necessary spots.
    Fix from Basil Gunn.

10) Action GET operations in the packet scheduler can erroneously bump
    the reference count of the entry, making it unreleasable. Fix from
    Jamal Hadi Salim. Jamal gives a great set of example command lines
    that trigger this in the commit message.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (46 commits)
  net sched actions: fix refcnt when GETing of action after bind
  net/mlx4_core: Eliminate warning messages for SRQ_LIMIT under SRIOV
  net/mlx4_core: Fix when to save some qp context flags for dynamic VST to VGT transitions
  net/mlx4_core: Fix racy CQ (Completion Queue) free
  net: stmmac: don't use netdev_[dbg, info, ..] before net_device is registered
  net/mlx5e: Fix a -Wmaybe-uninitialized warning
  ax25: Fix segfault after sock connection timeout
  bpf: rework prog_digest into prog_tag
  tipc: allocate user memory with GFP_KERNEL flag
  net: phy: dp83867: allow RGMII_TXID/RGMII_RXID interface types
  ip6_tunnel: Account for tunnel header in tunnel MTU
  mld: do not remove mld souce list info when set link down
  be2net: fix MAC addr setting on privileged BE3 VFs
  be2net: don't delete MAC on close on unprivileged BE3 VFs
  be2net: fix status check in be_cmd_pmac_add()
  cpmac: remove hopeless #warning
  ravb: do not use zero-length alignment DMA descriptor
  mlx4: do not call napi_schedule() without care
  openvswitch: maintain correct checksum state in conntrack actions
  tcp: fix tcp_fastopen unaligned access complaints on sparc
  ...
  • Loading branch information
Linus Torvalds committed Jan 17, 2017
2 parents 203f80f + 0faa9cb commit 4b19a9e
Show file tree
Hide file tree
Showing 60 changed files with 384 additions and 210 deletions.
6 changes: 4 additions & 2 deletions Documentation/devicetree/bindings/net/ti,dp83867.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,11 @@
Required properties:
- reg - The ID number for the phy, usually a small integer
- ti,rx-internal-delay - RGMII Receive Clock Delay - see dt-bindings/net/ti-dp83867.h
for applicable values
for applicable values. Required only if interface type is
PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_RXID
- ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h
for applicable values
for applicable values. Required only if interface type is
PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_TXID
- ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h
for applicable values

Expand Down
25 changes: 18 additions & 7 deletions drivers/net/ethernet/broadcom/bcmsysport.c
Original file line number Diff line number Diff line change
Expand Up @@ -710,11 +710,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
unsigned int pkts_compl = 0, bytes_compl = 0;
struct bcm_sysport_cb *cb;
struct netdev_queue *txq;
u32 hw_ind;

txq = netdev_get_tx_queue(ndev, ring->index);

/* Compute how many descriptors have been processed since last call */
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
Expand Down Expand Up @@ -745,9 +742,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,

ring->c_index = c_index;

if (netif_tx_queue_stopped(txq) && pkts_compl)
netif_tx_wake_queue(txq);

netif_dbg(priv, tx_done, ndev,
"ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
ring->index, ring->c_index, pkts_compl, bytes_compl);
Expand All @@ -759,16 +753,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_tx_ring *ring)
{
struct netdev_queue *txq;
unsigned int released;
unsigned long flags;

txq = netdev_get_tx_queue(priv->netdev, ring->index);

spin_lock_irqsave(&ring->lock, flags);
released = __bcm_sysport_tx_reclaim(priv, ring);
if (released)
netif_tx_wake_queue(txq);

spin_unlock_irqrestore(&ring->lock, flags);

return released;
}

/* Locked version of the per-ring TX reclaim, but does not wake the queue */
static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
struct bcm_sysport_tx_ring *ring)
{
unsigned long flags;

spin_lock_irqsave(&ring->lock, flags);
__bcm_sysport_tx_reclaim(priv, ring);
spin_unlock_irqrestore(&ring->lock, flags);
}

static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
{
struct bcm_sysport_tx_ring *ring =
Expand Down Expand Up @@ -1252,7 +1263,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
napi_disable(&ring->napi);
netif_napi_del(&ring->napi);

bcm_sysport_tx_reclaim(priv, ring);
bcm_sysport_tx_clean(priv, ring);

kfree(ring->cbs);
ring->cbs = NULL;
Expand Down
11 changes: 6 additions & 5 deletions drivers/net/ethernet/cavium/thunder/thunder_bgx.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,9 @@ struct lmac {
struct bgx {
u8 bgx_id;
struct lmac lmac[MAX_LMAC_PER_BGX];
int lmac_count;
u8 lmac_count;
u8 max_lmac;
u8 acpi_lmac_idx;
void __iomem *reg_base;
struct pci_dev *pdev;
bool is_dlm;
Expand Down Expand Up @@ -1143,13 +1144,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
if (acpi_bus_get_device(handle, &adev))
goto out;

acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac);
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);

SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev);
SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);

bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
bgx->acpi_lmac_idx++; /* move to next LMAC */
out:
bgx->lmac_count++;
return AE_OK;
}

Expand Down
2 changes: 1 addition & 1 deletion drivers/net/ethernet/emulex/benet/be_cmds.c
Original file line number Diff line number Diff line change
Expand Up @@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
err:
mutex_unlock(&adapter->mcc_lock);

if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;

return status;
Expand Down
18 changes: 15 additions & 3 deletions drivers/net/ethernet/emulex/benet/be_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
return 0;

/* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
* address
*/
if (BEx_chip(adapter) && be_virtfn(adapter) &&
!check_privilege(adapter, BE_PRIV_FILTMGMT))
return -EPERM;

/* if device is not running, copy MAC to netdev->dev_addr */
if (!netif_running(netdev))
goto done;
Expand Down Expand Up @@ -3609,7 +3616,11 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)

static void be_disable_if_filters(struct be_adapter *adapter)
{
be_dev_mac_del(adapter, adapter->pmac_id[0]);
/* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
check_privilege(adapter, BE_PRIV_FILTMGMT))
be_dev_mac_del(adapter, adapter->pmac_id[0]);

be_clear_uc_list(adapter);
be_clear_mc_list(adapter);

Expand Down Expand Up @@ -3762,8 +3773,9 @@ static int be_enable_if_filters(struct be_adapter *adapter)
if (status)
return status;

/* For BE3 VFs, the PF programs the initial MAC address */
if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
/* Don't add MAC on BE3 VFs without FILTMGMT privilege */
if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
check_privilege(adapter, BE_PRIV_FILTMGMT)) {
status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
if (status)
return status;
Expand Down
38 changes: 20 additions & 18 deletions drivers/net/ethernet/mellanox/mlx4/cq.c
Original file line number Diff line number Diff line change
Expand Up @@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
{
struct mlx4_cq *cq;

rcu_read_lock();
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
rcu_read_unlock();

if (!cq) {
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
return;
}

/* Acessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
++cq->arm_sn;

cq->comp(cq);
Expand All @@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
struct mlx4_cq *cq;

spin_lock(&cq_table->lock);

rcu_read_lock();
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
if (cq)
atomic_inc(&cq->refcount);

spin_unlock(&cq_table->lock);
rcu_read_unlock();

if (!cq) {
mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
return;
}

/* Acessing the CQ outside of rcu_read_lock is safe, because
* the CQ is freed only after interrupt handling is completed.
*/
cq->event(cq, event_type);

if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
}

static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
Expand Down Expand Up @@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
if (err)
return err;

spin_lock_irq(&cq_table->lock);
spin_lock(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
spin_unlock_irq(&cq_table->lock);
spin_unlock(&cq_table->lock);
if (err)
goto err_icm;

Expand Down Expand Up @@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
return 0;

err_radix:
spin_lock_irq(&cq_table->lock);
spin_lock(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock_irq(&cq_table->lock);
spin_unlock(&cq_table->lock);

err_icm:
mlx4_cq_free_icm(dev, cq->cqn);
Expand All @@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);

spin_lock(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock(&cq_table->lock);

synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);

spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock_irq(&cq_table->lock);

if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
Expand Down
5 changes: 4 additions & 1 deletion drivers/net/ethernet/mellanox/mlx4/en_netdev.c
Original file line number Diff line number Diff line change
Expand Up @@ -1748,8 +1748,11 @@ int mlx4_en_start_port(struct net_device *dev)
/* Process all completions if exist to prevent
* the queues freezing if they are full
*/
for (i = 0; i < priv->rx_ring_num; i++)
for (i = 0; i < priv->rx_ring_num; i++) {
local_bh_disable();
napi_schedule(&priv->rx_cq[i]->napi);
local_bh_enable();
}

netif_tx_start_all_queues(dev);
netif_device_attach(dev);
Expand Down
23 changes: 14 additions & 9 deletions drivers/net/ethernet/mellanox/mlx4/eq.c
Original file line number Diff line number Diff line change
Expand Up @@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break;

case MLX4_EVENT_TYPE_SRQ_LIMIT:
mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
__func__);
mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
__func__, be32_to_cpu(eqe->event.srq.srqn),
eq->eqn);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */
Expand All @@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eq->eqn, eq->cons_index, ret);
break;
}
mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
__func__, slave,
be32_to_cpu(eqe->event.srq.srqn),
eqe->type, eqe->subtype);
if (eqe->type ==
MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
__func__, slave,
be32_to_cpu(eqe->event.srq.srqn),
eqe->type, eqe->subtype);

if (!ret && slave != dev->caps.function) {
mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
__func__, eqe->type,
eqe->subtype, slave);
if (eqe->type ==
MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
__func__, eqe->type,
eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe);
break;
}
Expand Down
5 changes: 3 additions & 2 deletions drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
Original file line number Diff line number Diff line change
Expand Up @@ -2980,6 +2980,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
put_res(dev, slave, srqn, RES_SRQ);
qp->srq = srq;
}

/* Save param3 for dynamic changes from VST back to VGT */
qp->param3 = qpc->param3;
put_res(dev, slave, rcqn, RES_CQ);
put_res(dev, slave, mtt_base, RES_MTT);
res_end_move(dev, slave, RES_QP, qpn);
Expand Down Expand Up @@ -3772,7 +3775,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
u8 orig_sched_queue;
__be32 orig_param3 = qpc->param3;
u8 orig_vlan_control = qpc->pri_path.vlan_control;
u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
u8 orig_pri_path_fl = qpc->pri_path.fl;
Expand Down Expand Up @@ -3814,7 +3816,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
*/
if (!err) {
qp->sched_queue = orig_sched_queue;
qp->param3 = orig_param3;
qp->vlan_control = orig_vlan_control;
qp->fvl_rx = orig_fvl_rx;
qp->pri_path_fl = orig_pri_path_fl;
Expand Down
11 changes: 7 additions & 4 deletions drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
Original file line number Diff line number Diff line change
Expand Up @@ -668,9 +668,12 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
int ttl;

#if IS_ENABLED(CONFIG_INET)
int ret;

rt = ip_route_output_key(dev_net(mirred_dev), fl4);
if (IS_ERR(rt))
return PTR_ERR(rt);
ret = PTR_ERR_OR_ZERO(rt);
if (ret)
return ret;
#else
return -EOPNOTSUPP;
#endif
Expand Down Expand Up @@ -741,8 +744,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
struct flowi4 fl4 = {};
char *encap_header;
int encap_size;
__be32 saddr = 0;
int ttl = 0;
__be32 saddr;
int ttl;
int err;

encap_header = kzalloc(max_encap_size, GFP_KERNEL);
Expand Down
8 changes: 4 additions & 4 deletions drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
Original file line number Diff line number Diff line change
Expand Up @@ -209,21 +209,21 @@ MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
/* pci_eqe_cmd_token
* Command completion event - token
*/
MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16);

/* pci_eqe_cmd_status
* Command completion event - status
*/
MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8);

/* pci_eqe_cmd_out_param_h
* Command completion event - output parameter - higher part
*/
MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32);

/* pci_eqe_cmd_out_param_l
* Command completion event - output parameter - lower part
*/
MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32);

#endif
1 change: 1 addition & 0 deletions drivers/net/ethernet/mellanox/mlxsw/spectrum.c
Original file line number Diff line number Diff line change
Expand Up @@ -684,6 +684,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
dev_kfree_skb_any(skb_orig);
return NETDEV_TX_OK;
}
dev_consume_skb_any(skb_orig);
}

if (eth_skb_pad(skb)) {
Expand Down
1 change: 1 addition & 0 deletions drivers/net/ethernet/mellanox/mlxsw/switchx2.c
Original file line number Diff line number Diff line change
Expand Up @@ -345,6 +345,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
dev_kfree_skb_any(skb_orig);
return NETDEV_TX_OK;
}
dev_consume_skb_any(skb_orig);
}
mlxsw_sx_txhdr_construct(skb, &tx_info);
/* TX header is consumed by HW on the way so we shouldn't count its
Expand Down
Loading

0 comments on commit 4b19a9e

Please sign in to comment.