diff --git a/[refs] b/[refs] index 87585bb1a7f3..4389ffa967d0 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 08ff32352d6ff7083533dc1c25618d42f92ec28e +refs/heads/master: 3127e4ea54fc023e35adb1d9af29d49c6d582d12 diff --git a/trunk/drivers/infiniband/hw/mlx4/cq.c b/trunk/drivers/infiniband/hw/mlx4/cq.c index ae67df35dd4d..c9eb6a6815ce 100644 --- a/trunk/drivers/infiniband/hw/mlx4/cq.c +++ b/trunk/drivers/infiniband/hw/mlx4/cq.c @@ -66,7 +66,7 @@ static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n) { - return mlx4_buf_offset(&buf->buf, n * buf->entry_size); + return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe)); } static void *get_cqe(struct mlx4_ib_cq *cq, int n) @@ -77,9 +77,8 @@ static void *get_cqe(struct mlx4_ib_cq *cq, int n) static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n) { struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); - struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); - return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^ + return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; } @@ -100,13 +99,12 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf * { int err; - err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, + err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe), PAGE_SIZE * 2, &buf->buf); if (err) goto out; - buf->entry_size = dev->dev->caps.cqe_size; err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift, &buf->mtt); if (err) @@ -122,7 +120,8 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf * mlx4_mtt_cleanup(dev->dev, &buf->mtt); err_buf: - mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf); + mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe), + &buf->buf); out: return err; @@ -130,7 +129,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf * static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) { - mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); + mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf); } static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context, @@ -138,9 +137,8 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont u64 buf_addr, int cqe) { int err; - int cqe_size = dev->dev->caps.cqe_size; - *umem = ib_umem_get(context, buf_addr, cqe * cqe_size, + *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe), IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(*umem)) return PTR_ERR(*umem); @@ -333,23 +331,16 @@ static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq) { struct mlx4_cqe *cqe, *new_cqe; int i; - int cqe_size = cq->buf.entry_size; - int cqe_inc = cqe_size == 64 ? 1 : 0; i = cq->mcq.cons_index; cqe = get_cqe(cq, i & cq->ibcq.cqe); - cqe += cqe_inc; - while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) { new_cqe = get_cqe_from_buf(&cq->resize_buf->buf, (i + 1) & cq->resize_buf->cqe); - memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size); - new_cqe += cqe_inc; - + memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe)); new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) | (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0); cqe = get_cqe(cq, ++i & cq->ibcq.cqe); - cqe += cqe_inc; } ++cq->mcq.cons_index; } @@ -447,7 +438,6 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) out: mutex_unlock(&cq->resize_mutex); - return err; } @@ -596,9 +586,6 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, if (!cqe) return -EAGAIN; - if (cq->buf.entry_size == 64) - cqe++; - ++cq->mcq.cons_index; /* @@ -820,7 +807,6 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) int nfreed = 0; struct mlx4_cqe *cqe, *dest; u8 owner_bit; - int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0; /* * First we need to find the current producer index, so we @@ -839,16 +825,12 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) */ while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); - cqe += cqe_inc; - if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) { if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); ++nfreed; } else if (nfreed) { dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); - dest += cqe_inc; - owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK; memcpy(dest, cqe, sizeof *cqe); dest->owner_sr_opcode = owner_bit | diff --git a/trunk/drivers/infiniband/hw/mlx4/main.c b/trunk/drivers/infiniband/hw/mlx4/main.c index e7d81c0d1ac5..718ec6b2bad2 100644 --- a/trunk/drivers/infiniband/hw/mlx4/main.c +++ b/trunk/drivers/infiniband/hw/mlx4/main.c @@ -563,24 +563,15 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev, { struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_ucontext *context; - struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3; struct mlx4_ib_alloc_ucontext_resp resp; int err; if (!dev->ib_active) return ERR_PTR(-EAGAIN); - if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) { - resp_v3.qp_tab_size = dev->dev->caps.num_qps; - resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size; - resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; - } else { - resp.dev_caps = dev->dev->caps.userspace_caps; - resp.qp_tab_size = dev->dev->caps.num_qps; - resp.bf_reg_size = dev->dev->caps.bf_reg_size; - resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; - resp.cqe_size = dev->dev->caps.cqe_size; - } + resp.qp_tab_size = dev->dev->caps.num_qps; + resp.bf_reg_size = dev->dev->caps.bf_reg_size; + resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; context = kmalloc(sizeof *context, GFP_KERNEL); if (!context) @@ -595,11 +586,7 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev, INIT_LIST_HEAD(&context->db_page_list); mutex_init(&context->db_page_mutex); - if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) - err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3)); - else - err = ib_copy_to_udata(udata, &resp, sizeof(resp)); - + err = ib_copy_to_udata(udata, &resp, sizeof resp); if (err) { mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar); kfree(context); @@ -1355,11 +1342,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; ibdev->ib_dev.dma_device = &dev->pdev->dev; - if (dev->caps.userspace_caps) - ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; - else - ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION; - + ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; ibdev->ib_dev.uverbs_cmd_mask = (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | diff --git a/trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h b/trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h index dcd845bc30f0..e04cbc9a54a5 100644 --- a/trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -90,7 +90,6 @@ struct mlx4_ib_xrcd { struct mlx4_ib_cq_buf { struct mlx4_buf buf; struct mlx4_mtt mtt; - int entry_size; }; struct mlx4_ib_cq_resize { diff --git a/trunk/drivers/infiniband/hw/mlx4/user.h b/trunk/drivers/infiniband/hw/mlx4/user.h index 07e6769ef43b..13beedeeef9f 100644 --- a/trunk/drivers/infiniband/hw/mlx4/user.h +++ b/trunk/drivers/infiniband/hw/mlx4/user.h @@ -40,9 +40,7 @@ * Increment this value if any changes that break userspace ABI * compatibility are made. */ - -#define MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION 3 -#define MLX4_IB_UVERBS_ABI_VERSION 4 +#define MLX4_IB_UVERBS_ABI_VERSION 3 /* * Make sure that all structs defined in this file remain laid out so @@ -52,18 +50,10 @@ * instead. */ -struct mlx4_ib_alloc_ucontext_resp_v3 { - __u32 qp_tab_size; - __u16 bf_reg_size; - __u16 bf_regs_per_page; -}; - struct mlx4_ib_alloc_ucontext_resp { - __u32 dev_caps; __u32 qp_tab_size; __u16 bf_reg_size; __u16 bf_regs_per_page; - __u32 cqe_size; }; struct mlx4_ib_alloc_pd_resp { diff --git a/trunk/drivers/infiniband/hw/nes/nes_mgt.c b/trunk/drivers/infiniband/hw/nes/nes_mgt.c index 3ba7be369452..8cf74fd0c44f 100644 --- a/trunk/drivers/infiniband/hw/nes/nes_mgt.c +++ b/trunk/drivers/infiniband/hw/nes/nes_mgt.c @@ -447,7 +447,7 @@ static int forward_fpdus(struct nes_vnic *nesvnic, struct nes_qp *nesqp) set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX, lower_32_bits(u64tmp)); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_HIGH_IDX, - upper_32_bits(u64tmp >> 32)); + upper_32_bits(u64tmp)); set_wqe_32bit_value(cqp_wqe->wqe_words, NES_NIC_SQ_WQE_FRAG1_LOW_IDX, lower_32_bits(fpdu_info->frags[0].physaddr)); diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/cmd.c b/trunk/drivers/net/ethernet/mellanox/mlx4/cmd.c index e791e705f7b1..3d1899ff1076 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1755,7 +1755,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) spin_lock_init(&s_state->lock); } - memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size); + memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe)); priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD; INIT_WORK(&priv->mfunc.master.comm_work, mlx4_master_comm_channel); diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_cq.c index b8d0854a7ad1..aa9c2f6cf3c0 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -51,7 +51,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, int err; cq->size = entries; - cq->buf_size = cq->size * mdev->dev->caps.cqe_size; + cq->buf_size = cq->size * sizeof(struct mlx4_cqe); cq->ring = ring; cq->is_tx = mode; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 93a325669582..edd9cb8d3e1d 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1600,7 +1600,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, goto out; } priv->rx_ring_num = prof->rx_ring_num; - priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; priv->mac_index = -1; priv->msg_enable = MLX4_EN_MSG_LEVEL; spin_lock_init(&priv->stats_lock); diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 6fa106f6c0ec..5aba5ecdf1e2 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -566,7 +566,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud struct ethhdr *ethh; dma_addr_t dma; u64 s_mac; - int factor = priv->cqe_factor; if (!priv->port_up) return 0; @@ -575,7 +574,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud * descriptor offset can be deduced from the CQE index instead of * reading 'cqe->index' */ index = cq->mcq.cons_index & ring->size_mask; - cqe = &cq->buf[(index << factor) + factor]; + cqe = &cq->buf[index]; /* Process all completed CQEs */ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK, @@ -710,7 +709,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ++cq->mcq.cons_index; index = (cq->mcq.cons_index) & ring->size_mask; - cqe = &cq->buf[(index << factor) + factor]; + cqe = &cq->buf[index]; if (++polled == budget) { /* We are here because we reached the NAPI budget - * flush only pending LRO sessions */ diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 25c157abdd92..b35094c590ba 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -315,13 +315,12 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) struct mlx4_cqe *buf = cq->buf; u32 packets = 0; u32 bytes = 0; - int factor = priv->cqe_factor; if (!priv->port_up) return; index = cons_index & size_mask; - cqe = &buf[(index << factor) + factor]; + cqe = &buf[index]; ring_index = ring->cons & size_mask; /* Process all completed CQEs */ @@ -350,7 +349,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) ++cons_index; index = cons_index & size_mask; - cqe = &buf[(index << factor) + factor]; + cqe = &buf[index]; } diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/eq.c b/trunk/drivers/net/ethernet/mellanox/mlx4/eq.c index c509a86db610..b84a88bc44dc 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -101,21 +101,15 @@ static void eq_set_ci(struct mlx4_eq *eq, int req_not) mb(); } -static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor) +static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry) { - /* (entry & (eq->nent - 1)) gives us a cyclic array */ - unsigned long offset = (entry & (eq->nent - 1)) * (MLX4_EQ_ENTRY_SIZE << eqe_factor); - /* CX3 is capable of extending the EQE from 32 to 64 bytes. - * When this feature is enabled, the first (in the lower addresses) - * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes - * contain the legacy EQE information. - */ - return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; + unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE; + return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; } -static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor) +static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq) { - struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor); + struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index); return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; } @@ -183,7 +177,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) return; } - memcpy(s_eqe, eqe, dev->caps.eqe_size - 1); + memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1); s_eqe->slave_id = slave; /* ensure all information is written before setting the ownersip bit */ wmb(); @@ -447,7 +441,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) int i; enum slave_port_gen_event gen_event; - while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor))) { + while ((eqe = next_eqe_sw(eq))) { /* * Make sure we read EQ entry contents after we've * checked the ownership bit. @@ -870,8 +864,7 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent, eq->dev = dev; eq->nent = roundup_pow_of_two(max(nent, 2)); - /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */ - npages = PAGE_ALIGN(eq->nent * (MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor)) / PAGE_SIZE; + npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE; eq->page_list = kmalloc(npages * sizeof *eq->page_list, GFP_KERNEL); @@ -973,9 +966,8 @@ static void mlx4_free_eq(struct mlx4_dev *dev, struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cmd_mailbox *mailbox; int err; + int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE; int i; - /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes */ - int npages = PAGE_ALIGN((MLX4_EQ_ENTRY_SIZE << dev->caps.eqe_factor) * eq->nent) / PAGE_SIZE; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c b/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c index 9a9de51ecc91..4f30b99324cf 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -110,8 +110,6 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags) [42] = "Multicast VEP steering support", [48] = "Counters support", [59] = "Port management change event support", - [61] = "64 byte EQE support", - [62] = "64 byte CQE support", }; int i; @@ -237,7 +235,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, field = dev->caps.num_ports; MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); - size = dev->caps.function_caps; /* set PF behaviours */ + size = 0; /* no PF behaviour is set for now */ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET); field = 0; /* protected FMR support not available as yet */ @@ -1239,24 +1237,6 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS) *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4); - /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ - if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_EQE) { - *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 29); - dev->caps.eqe_size = 64; - dev->caps.eqe_factor = 1; - } else { - dev->caps.eqe_size = 32; - dev->caps.eqe_factor = 0; - } - - if (dev->caps.flags & MLX4_DEV_CAP_FLAG_64B_CQE) { - *(inbox + INIT_HCA_EQE_CQE_OFFSETS / 4) |= cpu_to_be32(1 << 30); - dev->caps.cqe_size = 64; - dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE; - } else { - dev->caps.cqe_size = 32; - } - /* QPC/EEC/CQC/EQC/RDMARC attributes */ MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); @@ -1339,7 +1319,6 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox; __be32 *outbox; int err; - u8 byte_field; #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04 @@ -1391,13 +1370,6 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); } - /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */ - MLX4_GET(byte_field, outbox, INIT_HCA_EQE_CQE_OFFSETS); - if (byte_field & 0x20) /* 64-bytes eqe enabled */ - param->dev_cap_enabled |= MLX4_DEV_CAP_64B_EQE_ENABLED; - if (byte_field & 0x40) /* 64-bytes cqe enabled */ - param->dev_cap_enabled |= MLX4_DEV_CAP_64B_CQE_ENABLED; - /* TPT attributes */ MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET); diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/fw.h b/trunk/drivers/net/ethernet/mellanox/mlx4/fw.h index 2c2e7ade2a34..85abe9c11a22 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -172,7 +172,6 @@ struct mlx4_init_hca_param { u8 log_uar_sz; u8 uar_page_sz; /* log pg sz in 4k chunks */ u8 fs_hash_enable_bits; - u64 dev_cap_enabled; }; struct mlx4_init_ib_param { diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c index 4337f685175d..2aa80afd98d2 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c @@ -95,14 +95,8 @@ MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" " Not in use with device managed" " flow steering"); -static bool enable_64b_cqe_eqe; -module_param(enable_64b_cqe_eqe, bool, 0444); -MODULE_PARM_DESC(enable_64b_cqe_eqe, - "Enable 64 byte CQEs/EQEs when the the FW supports this"); - #define HCA_GLOBAL_CAP_MASK 0 - -#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE +#define PF_CONTEXT_BEHAVIOUR_MASK 0 static char mlx4_version[] __devinitdata = DRV_NAME ": Mellanox ConnectX core driver v" @@ -392,21 +386,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; - - if (!enable_64b_cqe_eqe) { - if (dev_cap->flags & - (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { - mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); - dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; - dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; - } - } - - if ((dev_cap->flags & - (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && - mlx4_is_master(dev)) - dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; - return 0; } /*The function checks if there are live vf, return the num of them*/ @@ -620,21 +599,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) goto err_mem; } - if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { - dev->caps.eqe_size = 64; - dev->caps.eqe_factor = 1; - } else { - dev->caps.eqe_size = 32; - dev->caps.eqe_factor = 0; - } - - if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { - dev->caps.cqe_size = 64; - dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_64B_CQE; - } else { - dev->caps.cqe_size = 32; - } - return 0; err_mem: diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 73b5c2ac5bd5..9d27e42264e2 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -487,7 +487,6 @@ struct mlx4_en_priv { int mac_index; unsigned max_mtu; int base_qpn; - int cqe_factor; struct mlx4_en_rss_map rss_map; __be32 ctrl_flags; diff --git a/trunk/include/linux/mlx4/device.h b/trunk/include/linux/mlx4/device.h index 21821da2abfd..6d1acb04cd17 100644 --- a/trunk/include/linux/mlx4/device.h +++ b/trunk/include/linux/mlx4/device.h @@ -142,8 +142,6 @@ enum { MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55, MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59, - MLX4_DEV_CAP_FLAG_64B_EQE = 1LL << 61, - MLX4_DEV_CAP_FLAG_64B_CQE = 1LL << 62 }; enum { @@ -153,20 +151,6 @@ enum { MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3 }; -enum { - MLX4_DEV_CAP_64B_EQE_ENABLED = 1LL << 0, - MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1 -}; - -enum { - MLX4_USER_DEV_CAP_64B_CQE = 1L << 0 -}; - -enum { - MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0 -}; - - #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) enum { @@ -435,11 +419,6 @@ struct mlx4_caps { u32 max_counters; u8 port_ib_mtu[MLX4_MAX_PORTS + 1]; u16 sqp_demux; - u32 eqe_size; - u32 cqe_size; - u8 eqe_factor; - u32 userspace_caps; /* userspace must be aware of these */ - u32 function_caps; /* VFs must be aware of these */ }; struct mlx4_buf_list {