Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 254684
b: refs/heads/master
c: 5318d80
h: refs/heads/master
v: v3
  • Loading branch information
Shreyas Bhatewara authored and David S. Miller committed Jul 6, 2011
1 parent fd71b6c commit 7b902b1
Show file tree
Hide file tree
Showing 3 changed files with 97 additions and 44 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 44661462ee1ee3c922754fc1f246867f0d01e7ea
refs/heads/master: 5318d809d7b4975ce5e5303e8508f89a5458c2b6
135 changes: 94 additions & 41 deletions trunk/drivers/net/vmxnet3/vmxnet3_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -575,7 +575,7 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
u32 val;

while (num_allocated < num_to_alloc) {
while (num_allocated <= num_to_alloc) {
struct vmxnet3_rx_buf_info *rbi;
union Vmxnet3_GenericDesc *gd;

Expand Down Expand Up @@ -621,9 +621,15 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,

BUG_ON(rbi->dma_addr == 0);
gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
| val | rbi->len);

/* Fill the last buffer but dont mark it ready, or else the
* device will think that the queue is full */
if (num_allocated == num_to_alloc)
break;

gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
num_allocated++;
vmxnet3_cmd_ring_adv_next2fill(ring);
}
Expand Down Expand Up @@ -1140,6 +1146,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
};
u32 num_rxd = 0;
bool skip_page_frags = false;
struct Vmxnet3_RxCompDesc *rcd;
struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
#ifdef __BIG_ENDIAN_BITFIELD
Expand All @@ -1150,11 +1157,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
&rxComp);
while (rcd->gen == rq->comp_ring.gen) {
struct vmxnet3_rx_buf_info *rbi;
struct sk_buff *skb;
struct sk_buff *skb, *new_skb = NULL;
struct page *new_page = NULL;
int num_to_alloc;
struct Vmxnet3_RxDesc *rxd;
u32 idx, ring_idx;

struct vmxnet3_cmd_ring *ring = NULL;
if (num_rxd >= quota) {
/* we may stop even before we see the EOP desc of
* the current pkt
Expand All @@ -1165,6 +1173,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
idx = rcd->rxdIdx;
ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
ring = rq->rx_ring + ring_idx;
vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
&rxCmdDesc);
rbi = rq->buf_info[ring_idx] + idx;
Expand Down Expand Up @@ -1193,37 +1202,80 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
goto rcd_done;
}

skip_page_frags = false;
ctx->skb = rbi->skb;
rbi->skb = NULL;
new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN);
if (new_skb == NULL) {
/* Skb allocation failed, do not handover this
* skb to stack. Reuse it. Drop the existing pkt
*/
rq->stats.rx_buf_alloc_failure++;
ctx->skb = NULL;
rq->stats.drop_total++;
skip_page_frags = true;
goto rcd_done;
}

pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
PCI_DMA_FROMDEVICE);

skb_put(ctx->skb, rcd->len);

/* Immediate refill */
new_skb->dev = adapter->netdev;
skb_reserve(new_skb, NET_IP_ALIGN);
rbi->skb = new_skb;
rbi->dma_addr = pci_map_single(adapter->pdev,
rbi->skb->data, rbi->len,
PCI_DMA_FROMDEVICE);
rxd->addr = cpu_to_le64(rbi->dma_addr);
rxd->len = rbi->len;

} else {
BUG_ON(ctx->skb == NULL);
BUG_ON(ctx->skb == NULL && !skip_page_frags);

/* non SOP buffer must be type 1 in most cases */
if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) {
BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);

if (rcd->len) {
pci_unmap_page(adapter->pdev,
rbi->dma_addr, rbi->len,
PCI_DMA_FROMDEVICE);
/* If an sop buffer was dropped, skip all
* following non-sop fragments. They will be reused.
*/
if (skip_page_frags)
goto rcd_done;

vmxnet3_append_frag(ctx->skb, rcd, rbi);
rbi->page = NULL;
}
} else {
/*
* The only time a non-SOP buffer is type 0 is
* when it's EOP and error flag is raised, which
* has already been handled.
new_page = alloc_page(GFP_ATOMIC);
if (unlikely(new_page == NULL)) {
/* Replacement page frag could not be allocated.
* Reuse this page. Drop the pkt and free the
* skb which contained this page as a frag. Skip
* processing all the following non-sop frags.
*/
BUG_ON(true);
rq->stats.rx_buf_alloc_failure++;
dev_kfree_skb(ctx->skb);
ctx->skb = NULL;
skip_page_frags = true;
goto rcd_done;
}

if (rcd->len) {
pci_unmap_page(adapter->pdev,
rbi->dma_addr, rbi->len,
PCI_DMA_FROMDEVICE);

vmxnet3_append_frag(ctx->skb, rcd, rbi);
}

/* Immediate refill */
rbi->page = new_page;
rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
rxd->addr = cpu_to_le64(rbi->dma_addr);
rxd->len = rbi->len;
}


skb = ctx->skb;
if (rcd->eop) {
skb->len += skb->data_len;
Expand All @@ -1244,26 +1296,27 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
}

rcd_done:
/* device may skip some rx descs */
rq->rx_ring[ring_idx].next2comp = idx;
VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp,
rq->rx_ring[ring_idx].size);

/* refill rx buffers frequently to avoid starving the h/w */
num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring +
ring_idx);
if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq,
ring_idx, adapter))) {
vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc,
adapter);

/* if needed, update the register */
if (unlikely(rq->shared->updateRxProd)) {
VMXNET3_WRITE_BAR0_REG(adapter,
rxprod_reg[ring_idx] + rq->qid * 8,
rq->rx_ring[ring_idx].next2fill);
rq->uncommitted[ring_idx] = 0;
}
/* device may have skipped some rx descs */
ring->next2comp = idx;
num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
ring = rq->rx_ring + ring_idx;
while (num_to_alloc) {
vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
&rxCmdDesc);
BUG_ON(!rxd->addr);

/* Recv desc is ready to be used by the device */
rxd->gen = ring->gen;
vmxnet3_cmd_ring_adv_next2fill(ring);
num_to_alloc--;
}

/* if needed, update the register */
if (unlikely(rq->shared->updateRxProd)) {
VMXNET3_WRITE_BAR0_REG(adapter,
rxprod_reg[ring_idx] + rq->qid * 8,
ring->next2fill);
rq->uncommitted[ring_idx] = 0;
}

vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
Expand Down
4 changes: 2 additions & 2 deletions trunk/drivers/net/vmxnet3/vmxnet3_int.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,10 +68,10 @@
/*
* Version numbers
*/
#define VMXNET3_DRIVER_VERSION_STRING "1.1.9.0-k"
#define VMXNET3_DRIVER_VERSION_STRING "1.1.14.0-k"

/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
#define VMXNET3_DRIVER_VERSION_NUM 0x01010900
#define VMXNET3_DRIVER_VERSION_NUM 0x01010E00

#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
Expand Down

0 comments on commit 7b902b1

Please sign in to comment.