Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Browse files Browse the repository at this point in the history
Pull networking fixes from David Miller:

 1) Fix verifier memory corruption and other bugs in BPF layer, from
    Alexei Starovoitov.

 2) Add a conservative fix for doing BPF properly in the BPF classifier
    of the packet scheduler on ingress.  Also from Alexei.

 3) The SKB scrubber should not clear out the packet MARK and security
    label, from Herbert Xu.

 4) Fix oops on rmmod in stmmac driver, from Bryan O'Donoghue.

 5) Pause handling is not correct in the stmmac driver because it
    doesn't take into consideration the RX and TX fifo sizes.  From
    Vince Bridgers.

 6) Failure path missing unlock in FOU driver, from Wang Cong.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (44 commits)
  net: dsa: use DEVICE_ATTR_RW to declare temp1_max
  netns: remove BUG_ONs from net_generic()
  IB/ipoib: Fix ndo_get_iflink
  sfc: Fix memcpy() with const destination compiler warning.
  altera tse: Fix network-delays and -retransmissions after high throughput.
  net: remove unused 'dev' argument from netif_needs_gso()
  act_mirred: Fix bogus header when redirecting from VLAN
  inet_diag: fix access to tcp cc information
  tcp: tcp_get_info() should fetch socket fields once
  net: dsa: mv88e6xxx: Add missing initialization in mv88e6xxx_set_port_state()
  skbuff: Do not scrub skb mark within the same name space
  Revert "net: Reset secmark when scrubbing packet"
  bpf: fix two bugs in verification logic when accessing 'ctx' pointer
  bpf: fix bpf helpers to use skb->mac_header relative offsets
  stmmac: Configure Flow Control to work correctly based on rxfifo size
  stmmac: Enable unicast pause frame detect in GMAC Register 6
  stmmac: Read tx-fifo-depth and rx-fifo-depth from the devicetree
  stmmac: Add defines and documentation for enabling flow control
  stmmac: Add properties for transmit and receive fifo sizes
  stmmac: fix oops on rmmod after assigning ip addr
  ...
  • Loading branch information
Linus Torvalds committed Apr 17, 2015
2 parents e2fdae7 + e3122b7 commit 388f997
Show file tree
Hide file tree
Showing 62 changed files with 656 additions and 352 deletions.
6 changes: 6 additions & 0 deletions Documentation/devicetree/bindings/net/ethernet.txt
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,12 @@ The following properties are common to the Ethernet controllers:
- phy: the same as "phy-handle" property, not recommended for new bindings.
- phy-device: the same as "phy-handle" property, not recommended for new
bindings.
- rx-fifo-depth: the size of the controller's receive fifo in bytes. This
is used for components that can have configurable receive fifo sizes,
and is useful for determining certain configuration settings such as
flow control thresholds.
- tx-fifo-depth: the size of the controller's transmit fifo in bytes. This
is used for components that can have configurable fifo sizes.

Child nodes of the Ethernet controller are typically the individual PHY devices
connected via the MDIO bus (sometimes the MDIO bus controller is separate).
Expand Down
4 changes: 4 additions & 0 deletions Documentation/devicetree/bindings/net/stmmac.txt
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ Optional properties:
If not passed then the system clock will be used and this is fine on some
platforms.
- snps,burst_len: The AXI burst lenth value of the AXI BUS MODE register.
- tx-fifo-depth: See ethernet.txt file in the same directory
- rx-fifo-depth: See ethernet.txt file in the same directory

Examples:

Expand All @@ -59,6 +61,8 @@ Examples:
phy-mode = "gmii";
snps,multicast-filter-bins = <256>;
snps,perfect-filter-entries = <128>;
rx-fifo-depth = <16384>;
tx-fifo-depth = <16384>;
clocks = <&clock>;
clock-names = "stmmaceth";
};
2 changes: 1 addition & 1 deletion drivers/infiniband/hw/cxgb4/mem.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
c4iw_init_wr_wait(&wr_wait);
wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);

skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
skb = alloc_skb(wr_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
Expand Down
5 changes: 5 additions & 0 deletions drivers/infiniband/ulp/ipoib/ipoib_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -846,6 +846,11 @@ static int ipoib_get_iflink(const struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);

/* parent interface */
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
return dev->ifindex;

/* child/vlan interface */
return priv->parent->ifindex;
}

Expand Down
3 changes: 1 addition & 2 deletions drivers/infiniband/ulp/ipoib/ipoib_vlan.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
/* MTU will be reset when mcast join happens */
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
priv->parent = ppriv->dev;
set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);

result = ipoib_set_dev_features(priv, ppriv->ca);
Expand All @@ -84,8 +85,6 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
goto register_failed;
}

priv->parent = ppriv->dev;

ipoib_create_debug_files(priv->dev);

/* RTNL childs don't need proprietary sysfs entries */
Expand Down
8 changes: 4 additions & 4 deletions drivers/net/dsa/mv88e6xxx.c
Original file line number Diff line number Diff line change
Expand Up @@ -602,8 +602,6 @@ static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
u32 high = 0;

if (s->reg >= 0x100) {
int ret;

ret = mv88e6xxx_reg_read(ds, REG_PORT(port),
s->reg - 0x100);
if (ret < 0)
Expand Down Expand Up @@ -902,14 +900,16 @@ static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int reg, ret;
int reg, ret = 0;
u8 oldstate;

mutex_lock(&ps->smi_mutex);

reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
if (reg < 0)
if (reg < 0) {
ret = reg;
goto abort;
}

oldstate = reg & PORT_CONTROL_STATE_MASK;
if (oldstate != state) {
Expand Down
9 changes: 7 additions & 2 deletions drivers/net/ethernet/altera/altera_tse_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -376,8 +376,13 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
u16 pktlength;
u16 pktstatus;

while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) &&
(count < limit)) {
/* Check for count < limit first as get_rx_status is changing
* the response-fifo so we must process the next packet
* after calling get_rx_status if a response is pending.
* (reading the last byte of the response pops the value from the fifo.)
*/
while ((count < limit) &&
((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) {
pktstatus = rxstatus >> 16;
pktlength = rxstatus & 0xffff;

Expand Down
137 changes: 51 additions & 86 deletions drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
Original file line number Diff line number Diff line change
Expand Up @@ -531,20 +531,8 @@ struct bnx2x_fastpath {
struct napi_struct napi;

#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define BNX2X_FP_STATE_IDLE 0
#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
#define BNX2X_FP_STATE_DISABLED (1 << 2)
#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
/* protect state */
spinlock_t lock;
#endif /* CONFIG_NET_RX_BUSY_POLL */
unsigned long busy_poll_state;
#endif

union host_hc_status_block status_blk;
/* chip independent shortcuts into sb structure */
Expand Down Expand Up @@ -619,104 +607,83 @@ struct bnx2x_fastpath {
#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))

#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)

enum bnx2x_fp_state {
BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */

BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */
BNX2X_STATE_FP_NAPI_REQ = BIT(1),

BNX2X_STATE_FP_POLL_BIT = 2,
BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */

BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */
};

static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
{
spin_lock_init(&fp->lock);
fp->state = BNX2X_FP_STATE_IDLE;
WRITE_ONCE(fp->busy_poll_state, 0);
}

/* called from the device poll routine to get ownership of a FP */
static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
{
bool rc = true;

spin_lock_bh(&fp->lock);
if (fp->state & BNX2X_FP_LOCKED) {
WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
fp->state |= BNX2X_FP_STATE_NAPI_YIELD;
rc = false;
} else {
/* we don't care if someone yielded */
fp->state = BNX2X_FP_STATE_NAPI;
unsigned long prev, old = READ_ONCE(fp->busy_poll_state);

while (1) {
switch (old) {
case BNX2X_STATE_FP_POLL:
/* make sure bnx2x_fp_lock_poll() wont starve us */
set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT,
&fp->busy_poll_state);
/* fallthrough */
case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ:
return false;
default:
break;
}
prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI);
if (unlikely(prev != old)) {
old = prev;
continue;
}
return true;
}
spin_unlock_bh(&fp->lock);
return rc;
}

/* returns true is someone tried to get the FP while napi had it */
static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
{
bool rc = false;

spin_lock_bh(&fp->lock);
WARN_ON(fp->state &
(BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));

if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
rc = true;

/* state ==> idle, unless currently disabled */
fp->state &= BNX2X_FP_STATE_DISABLED;
spin_unlock_bh(&fp->lock);
return rc;
smp_wmb();
fp->busy_poll_state = 0;
}

/* called from bnx2x_low_latency_poll() */
static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
{
bool rc = true;

spin_lock_bh(&fp->lock);
if ((fp->state & BNX2X_FP_LOCKED)) {
fp->state |= BNX2X_FP_STATE_POLL_YIELD;
rc = false;
} else {
/* preserve yield marks */
fp->state |= BNX2X_FP_STATE_POLL;
}
spin_unlock_bh(&fp->lock);
return rc;
return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0;
}

/* returns true if someone tried to get the FP while it was locked */
static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
{
bool rc = false;

spin_lock_bh(&fp->lock);
WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);

if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
rc = true;

/* state ==> idle, unless currently disabled */
fp->state &= BNX2X_FP_STATE_DISABLED;
spin_unlock_bh(&fp->lock);
return rc;
smp_mb__before_atomic();
clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state);
}

/* true if a socket is polling, even if it did not get the lock */
/* true if a socket is polling */
static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
{
WARN_ON(!(fp->state & BNX2X_FP_OWNED));
return fp->state & BNX2X_FP_USER_PEND;
return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL;
}

/* false if fp is currently owned */
static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
{
int rc = true;

spin_lock_bh(&fp->lock);
if (fp->state & BNX2X_FP_OWNED)
rc = false;
fp->state |= BNX2X_FP_STATE_DISABLED;
spin_unlock_bh(&fp->lock);
set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state);
return !bnx2x_fp_ll_polling(fp);

return rc;
}
#else
static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
{
}

Expand All @@ -725,19 +692,17 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
return true;
}

static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
{
return false;
}

static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
{
return false;
}

static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
{
return false;
}

static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
Expand Down
9 changes: 5 additions & 4 deletions drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
Original file line number Diff line number Diff line change
Expand Up @@ -1849,7 +1849,7 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
int i;

for_each_rx_queue_cnic(bp, i) {
bnx2x_fp_init_lock(&bp->fp[i]);
bnx2x_fp_busy_poll_init(&bp->fp[i]);
napi_enable(&bnx2x_fp(bp, i, napi));
}
}
Expand All @@ -1859,7 +1859,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
int i;

for_each_eth_queue(bp, i) {
bnx2x_fp_init_lock(&bp->fp[i]);
bnx2x_fp_busy_poll_init(&bp->fp[i]);
napi_enable(&bnx2x_fp(bp, i, napi));
}
}
Expand Down Expand Up @@ -3191,9 +3191,10 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
}
}

bnx2x_fp_unlock_napi(fp);

/* Fall out from the NAPI loop if needed */
if (!bnx2x_fp_unlock_napi(fp) &&
!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {

/* No need to update SB for FCoE L2 ring as long as
* it's connected to the default SB and the SB
Expand Down
15 changes: 12 additions & 3 deletions drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -1140,26 +1140,32 @@ static int set_filter_wr(struct adapter *adapter, int fidx)
struct fw_filter_wr *fwr;
unsigned int ftid;

skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
if (!skb)
return -ENOMEM;

/* If the new filter requires loopback Destination MAC and/or VLAN
* rewriting then we need to allocate a Layer 2 Table (L2T) entry for
* the filter.
*/
if (f->fs.newdmac || f->fs.newvlan) {
/* allocate L2T entry for new filter */
f->l2t = t4_l2t_alloc_switching(adapter->l2t);
if (f->l2t == NULL)
if (f->l2t == NULL) {
kfree_skb(skb);
return -EAGAIN;
}
if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
f->fs.eport, f->fs.dmac)) {
cxgb4_l2t_release(f->l2t);
f->l2t = NULL;
kfree_skb(skb);
return -ENOMEM;
}
}

ftid = adapter->tids.ftid_base + fidx;

skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
memset(fwr, 0, sizeof(*fwr));

Expand Down Expand Up @@ -1257,7 +1263,10 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
len = sizeof(*fwr);
ftid = adapter->tids.ftid_base + fidx;

skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return -ENOMEM;

fwr = (struct fw_filter_wr *)__skb_put(skb, len);
t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);

Expand Down
Loading

0 comments on commit 388f997

Please sign in to comment.