Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 343661
b: refs/heads/master
c: 3127e4e
h: refs/heads/master
i:
  343659: 5520491
v: v3
  • Loading branch information
Tatyana Nikolova authored and Roland Dreier committed Nov 22, 2012
1 parent 578a489 commit 9b55963
Show file tree
Hide file tree
Showing 17 changed files with 33 additions and 177 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 08ff32352d6ff7083533dc1c25618d42f92ec28e
refs/heads/master: 3127e4ea54fc023e35adb1d9af29d49c6d582d12
34 changes: 8 additions & 26 deletions trunk/drivers/infiniband/hw/mlx4/cq.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)

static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
{
return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe));
}

static void *get_cqe(struct mlx4_ib_cq *cq, int n)
Expand All @@ -77,9 +77,8 @@ static void *get_cqe(struct mlx4_ib_cq *cq, int n)
static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
{
struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);

return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
!!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
}

Expand All @@ -100,13 +99,12 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
{
int err;

err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
PAGE_SIZE * 2, &buf->buf);

if (err)
goto out;

buf->entry_size = dev->dev->caps.cqe_size;
err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
&buf->mtt);
if (err)
Expand All @@ -122,25 +120,25 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
mlx4_mtt_cleanup(dev->dev, &buf->mtt);

err_buf:
mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
&buf->buf);

out:
return err;
}

static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
{
mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
}

static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
u64 buf_addr, int cqe)
{
int err;
int cqe_size = dev->dev->caps.cqe_size;

*umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
*umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
Expand Down Expand Up @@ -333,23 +331,16 @@ static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
{
struct mlx4_cqe *cqe, *new_cqe;
int i;
int cqe_size = cq->buf.entry_size;
int cqe_inc = cqe_size == 64 ? 1 : 0;

i = cq->mcq.cons_index;
cqe = get_cqe(cq, i & cq->ibcq.cqe);
cqe += cqe_inc;

while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
(i + 1) & cq->resize_buf->cqe);
memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
new_cqe += cqe_inc;

memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
(((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
cqe += cqe_inc;
}
++cq->mcq.cons_index;
}
Expand Down Expand Up @@ -447,7 +438,6 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)

out:
mutex_unlock(&cq->resize_mutex);

return err;
}

Expand Down Expand Up @@ -596,9 +586,6 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
if (!cqe)
return -EAGAIN;

if (cq->buf.entry_size == 64)
cqe++;

++cq->mcq.cons_index;

/*
Expand Down Expand Up @@ -820,7 +807,6 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
int nfreed = 0;
struct mlx4_cqe *cqe, *dest;
u8 owner_bit;
int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;

/*
* First we need to find the current producer index, so we
Expand All @@ -839,16 +825,12 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
*/
while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
cqe += cqe_inc;

if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
++nfreed;
} else if (nfreed) {
dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
dest += cqe_inc;

owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
memcpy(dest, cqe, sizeof *cqe);
dest->owner_sr_opcode = owner_bit |
Expand Down
27 changes: 5 additions & 22 deletions trunk/drivers/infiniband/hw/mlx4/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -563,24 +563,15 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_ucontext *context;
struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
struct mlx4_ib_alloc_ucontext_resp resp;
int err;

if (!dev->ib_active)
return ERR_PTR(-EAGAIN);

if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
resp_v3.qp_tab_size = dev->dev->caps.num_qps;
resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
} else {
resp.dev_caps = dev->dev->caps.userspace_caps;
resp.qp_tab_size = dev->dev->caps.num_qps;
resp.bf_reg_size = dev->dev->caps.bf_reg_size;
resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
resp.cqe_size = dev->dev->caps.cqe_size;
}
resp.qp_tab_size = dev->dev->caps.num_qps;
resp.bf_reg_size = dev->dev->caps.bf_reg_size;
resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;

context = kmalloc(sizeof *context, GFP_KERNEL);
if (!context)
Expand All @@ -595,11 +586,7 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);

if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
else
err = ib_copy_to_udata(udata, &resp, sizeof(resp));

err = ib_copy_to_udata(udata, &resp, sizeof resp);
if (err) {
mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
kfree(context);
Expand Down Expand Up @@ -1355,11 +1342,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
ibdev->ib_dev.dma_device = &dev->pdev->dev;

if (dev->caps.userspace_caps)
ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
else
ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;

ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
ibdev->ib_dev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
Expand Down
1 change: 0 additions & 1 deletion trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,6 @@ struct mlx4_ib_xrcd {
struct mlx4_ib_cq_buf {
struct mlx4_buf buf;
struct mlx4_mtt mtt;
int entry_size;
};

struct mlx4_ib_cq_resize {
Expand Down
12 changes: 1 addition & 11 deletions trunk/drivers/infiniband/hw/mlx4/user.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,7 @@
* Increment this value if any changes that break userspace ABI
* compatibility are made.
*/

#define MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION 3
#define MLX4_IB_UVERBS_ABI_VERSION 4
#define MLX4_IB_UVERBS_ABI_VERSION 3

/*
* Make sure that all structs defined in this file remain laid out so
Expand All @@ -52,18 +50,10 @@
* instead.
*/

struct mlx4_ib_alloc_ucontext_resp_v3 {
__u32 qp_tab_size;
__u16 bf_reg_size;
__u16 bf_regs_per_page;
};

struct mlx4_ib_alloc_ucontext_resp {
__u32 dev_caps;
__u32 qp_tab_size;
__u16 bf_reg_size;
__u16 bf_regs_per_page;
__u32 cqe_size;
};

struct mlx4_ib_alloc_pd_resp {
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/infiniband/hw/nes/nes_mgt.c
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,7 @@ static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp)
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX,
lower_32_bits(u64tmp));
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_HIGH_IDX,
upper_32_bits(u64tmp >> 32));
upper_32_bits(u64tmp));

set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX,
lower_32_bits(fpdu_info->frags[0].physaddr));
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/net/ethernet/mellanox/mlx4/cmd.c
Original file line number Diff line number Diff line change
Expand Up @@ -1755,7 +1755,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
spin_lock_init(&s_state->lock);
}

memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
INIT_WORK(&priv->mfunc.master.comm_work,
mlx4_master_comm_channel);
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/net/ethernet/mellanox/mlx4/en_cq.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
int err;

cq->size = entries;
cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
cq->buf_size = cq->size * sizeof(struct mlx4_cqe);

cq->ring = ring;
cq->is_tx = mode;
Expand Down
1 change: 0 additions & 1 deletion trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
Original file line number Diff line number Diff line change
Expand Up @@ -1600,7 +1600,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
goto out;
}
priv->rx_ring_num = prof->rx_ring_num;
priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL;
spin_lock_init(&priv->stats_lock);
Expand Down
5 changes: 2 additions & 3 deletions trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c
Original file line number Diff line number Diff line change
Expand Up @@ -566,7 +566,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct ethhdr *ethh;
dma_addr_t dma;
u64 s_mac;
int factor = priv->cqe_factor;

if (!priv->port_up)
return 0;
Expand All @@ -575,7 +574,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
* descriptor offset can be deduced from the CQE index instead of
* reading 'cqe->index' */
index = cq->mcq.cons_index & ring->size_mask;
cqe = &cq->buf[(index << factor) + factor];
cqe = &cq->buf[index];

/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
Expand Down Expand Up @@ -710,7 +709,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud

++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
cqe = &cq->buf[(index << factor) + factor];
cqe = &cq->buf[index];
if (++polled == budget) {
/* We are here because we reached the NAPI budget -
* flush only pending LRO sessions */
Expand Down
5 changes: 2 additions & 3 deletions trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c
Original file line number Diff line number Diff line change
Expand Up @@ -315,13 +315,12 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
struct mlx4_cqe *buf = cq->buf;
u32 packets = 0;
u32 bytes = 0;
int factor = priv->cqe_factor;

if (!priv->port_up)
return;

index = cons_index & size_mask;
cqe = &buf[(index << factor) + factor];
cqe = &buf[index];
ring_index = ring->cons & size_mask;

/* Process all completed CQEs */
Expand Down Expand Up @@ -350,7 +349,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)

++cons_index;
index = cons_index & size_mask;
cqe = &buf[(index << factor) + factor];
cqe = &buf[index];
}


Expand Down
26 changes: 9 additions & 17 deletions trunk/drivers/net/ethernet/mellanox/mlx4/eq.c
Original file line number Diff line number Diff line change
Expand Up @@ -101,21 +101,15 @@ static void eq_set_ci(struct mlx4_eq *eq, int req_not)
mb();
}

static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor)
static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
{
/* (entry & (eq->nent - 1)) gives us a cyclic array */
unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor);
/* CX3 is capable of extending the EQE from 32 to 64 bytes.
* When this feature is enabled, the first (in the lower addresses)
* 32 bytes in the 64 byte EQE are reserved and the next 32 bytes
* contain the legacy EQE information.
*/
return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
}

static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor)
static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
{
struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor);
struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
}

Expand Down Expand Up @@ -183,7 +177,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
return;
}

memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
s_eqe->slave_id = slave;
/* ensure all information is written before setting the ownersip bit */
wmb();
Expand Down Expand Up @@ -447,7 +441,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
int i;
enum slave_port_gen_event gen_event;

while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) {
while ((eqe = next_eqe_sw(eq))) {
/*
* Make sure we read EQ entry contents after we've
* checked the ownership bit.
Expand Down Expand Up @@ -870,8 +864,7 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,

eq->dev = dev;
eq->nent = roundup_pow_of_two(max(nent, 2));
/* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */
npages = PAGE_ALIGN(eq->nent * (MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor)) / PAGE_SIZE;
npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;

eq->page_list = kmalloc(npages * sizeof *eq->page_list,
GFP_KERNEL);
Expand Down Expand Up @@ -973,9 +966,8 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
int err;
int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
int i;
/* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */
int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor) * eq->nent) / PAGE_SIZE;

mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
Expand Down
Loading

0 comments on commit 9b55963

Please sign in to comment.