Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 343672
b: refs/heads/master
c: c9b03c1
h: refs/heads/master
v: v3
  • Loading branch information
Bart Van Assche authored and Roland Dreier committed Dec 1, 2012
1 parent bad4ca1 commit 5d72072
Show file tree
Hide file tree
Showing 36 changed files with 186 additions and 272 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 78c90247afb786b1a1fbfffbcd42907ce480e36f
refs/heads/master: c9b03c1ae55decc721310b79d8f50d44fbb37dc7
9 changes: 5 additions & 4 deletions trunk/drivers/infiniband/core/cma.c
Original file line number Diff line number Diff line change
Expand Up @@ -345,17 +345,17 @@ static int find_gid_port(struct ib_device *device, union ib_gid *gid, u8 port_nu

err = ib_query_port(device, port_num, &props);
if (err)
return err;
return 1;

for (i = 0; i < props.gid_tbl_len; ++i) {
err = ib_query_gid(device, port_num, i, &tmp);
if (err)
return err;
return 1;
if (!memcmp(&tmp, gid, sizeof tmp))
return 0;
}

return -EADDRNOTAVAIL;
return -EAGAIN;
}

static int cma_acquire_dev(struct rdma_id_private *id_priv)
Expand Down Expand Up @@ -388,7 +388,8 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
if (!ret) {
id_priv->id.port_num = port;
goto out;
}
} else if (ret == 1)
break;
}
}
}
Expand Down
1 change: 0 additions & 1 deletion trunk/drivers/infiniband/hw/amso1100/c2_ae.c
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,6 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
if (cq->ibcq.event_handler)
cq->ibcq.event_handler(&ib_event,
cq->ibcq.cq_context);
break;
}

default:
Expand Down
6 changes: 4 additions & 2 deletions trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c
Original file line number Diff line number Diff line change
Expand Up @@ -128,8 +128,9 @@ static void stop_ep_timer(struct iwch_ep *ep)
{
PDBG("%s ep %p\n", __func__, ep);
if (!timer_pending(&ep->timer)) {
WARN(1, "%s timer stopped when its not running! ep %p state %u\n",
printk(KERN_ERR "%s timer stopped when its not running! ep %p state %u\n",
__func__, ep, ep->com.state);
WARN_ON(1);
return;
}
del_timer_sync(&ep->timer);
Expand Down Expand Up @@ -1755,8 +1756,9 @@ static void ep_timeout(unsigned long arg)
__state_set(&ep->com, ABORTING);
break;
default:
WARN(1, "%s unexpected state ep %p state %u\n",
printk(KERN_ERR "%s unexpected state ep %p state %u\n",
__func__, ep, ep->com.state);
WARN_ON(1);
abort = 0;
}
spin_unlock_irqrestore(&ep->com.lock, flags);
Expand Down
6 changes: 4 additions & 2 deletions trunk/drivers/infiniband/hw/cxgb4/cm.c
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,9 @@ static void stop_ep_timer(struct c4iw_ep *ep)
{
PDBG("%s ep %p\n", __func__, ep);
if (!timer_pending(&ep->timer)) {
WARN(1, "%s timer stopped when its not running! "
printk(KERN_ERR "%s timer stopped when its not running! "
"ep %p state %u\n", __func__, ep, ep->com.state);
WARN_ON(1);
return;
}
del_timer_sync(&ep->timer);
Expand Down Expand Up @@ -2550,8 +2551,9 @@ static void process_timeout(struct c4iw_ep *ep)
__state_set(&ep->com, ABORTING);
break;
default:
WARN(1, "%s unexpected state ep %p tid %u state %u\n",
printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
__func__, ep, ep->hwtid, ep->com.state);
WARN_ON(1);
abort = 0;
}
mutex_unlock(&ep->com.mutex);
Expand Down
10 changes: 10 additions & 0 deletions trunk/drivers/infiniband/hw/ipath/ipath_init_chip.c
Original file line number Diff line number Diff line change
Expand Up @@ -718,6 +718,16 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
if (ret)
goto done;

/*
* we ignore most issues after reporting them, but have to specially
* handle hardware-disabled chips.
*/
if (ret == 2) {
/* unique error, known to ipath_init_one */
ret = -EPERM;
goto done;
}

/*
* We could bump this to allow for full rcvegrcnt + rcvtidcnt,
* but then it no longer nicely fits power of two, and since
Expand Down
4 changes: 2 additions & 2 deletions trunk/drivers/infiniband/hw/mlx4/cm.c
Original file line number Diff line number Diff line change
Expand Up @@ -268,15 +268,15 @@ static void schedule_delayed(struct ib_device *ibdev, struct id_map_entry *id)
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
unsigned long flags;

spin_lock(&sriov->id_map_lock);
spin_lock_irqsave(&sriov->going_down_lock, flags);
spin_lock(&sriov->id_map_lock);
/*make sure that there is no schedule inside the scheduled work.*/
if (!sriov->is_going_down) {
id->scheduled_delete = 1;
schedule_delayed_work(&id->timeout, CM_CLEANUP_CACHE_TIMEOUT);
}
spin_unlock_irqrestore(&sriov->going_down_lock, flags);
spin_unlock(&sriov->id_map_lock);
spin_unlock_irqrestore(&sriov->going_down_lock, flags);
}

int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id,
Expand Down
34 changes: 8 additions & 26 deletions trunk/drivers/infiniband/hw/mlx4/cq.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)

static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
{
return mlx4_buf_offset(&buf->buf, n * buf->entry_size);
return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe));
}

static void *get_cqe(struct mlx4_ib_cq *cq, int n)
Expand All @@ -77,9 +77,8 @@ static void *get_cqe(struct mlx4_ib_cq *cq, int n)
static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n)
{
struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);

return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
return (!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
!!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
}

Expand All @@ -100,13 +99,12 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
{
int err;

err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
err = mlx4_buf_alloc(dev->dev, nent * sizeof(struct mlx4_cqe),
PAGE_SIZE * 2, &buf->buf);

if (err)
goto out;

buf->entry_size = dev->dev->caps.cqe_size;
err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift,
&buf->mtt);
if (err)
Expand All @@ -122,25 +120,25 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
mlx4_mtt_cleanup(dev->dev, &buf->mtt);

err_buf:
mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
mlx4_buf_free(dev->dev, nent * sizeof(struct mlx4_cqe),
&buf->buf);

out:
return err;
}

static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
{
mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
mlx4_buf_free(dev->dev, (cqe + 1) * sizeof(struct mlx4_cqe), &buf->buf);
}

static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context,
struct mlx4_ib_cq_buf *buf, struct ib_umem **umem,
u64 buf_addr, int cqe)
{
int err;
int cqe_size = dev->dev->caps.cqe_size;

*umem = ib_umem_get(context, buf_addr, cqe * cqe_size,
*umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
IB_ACCESS_LOCAL_WRITE, 1);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
Expand Down Expand Up @@ -333,23 +331,16 @@ static void mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq)
{
struct mlx4_cqe *cqe, *new_cqe;
int i;
int cqe_size = cq->buf.entry_size;
int cqe_inc = cqe_size == 64 ? 1 : 0;

i = cq->mcq.cons_index;
cqe = get_cqe(cq, i & cq->ibcq.cqe);
cqe += cqe_inc;

while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
new_cqe = get_cqe_from_buf(&cq->resize_buf->buf,
(i + 1) & cq->resize_buf->cqe);
memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
new_cqe += cqe_inc;

memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), sizeof(struct mlx4_cqe));
new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
(((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
cqe += cqe_inc;
}
++cq->mcq.cons_index;
}
Expand Down Expand Up @@ -447,7 +438,6 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)

out:
mutex_unlock(&cq->resize_mutex);

return err;
}

Expand Down Expand Up @@ -596,9 +586,6 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
if (!cqe)
return -EAGAIN;

if (cq->buf.entry_size == 64)
cqe++;

++cq->mcq.cons_index;

/*
Expand Down Expand Up @@ -820,7 +807,6 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
int nfreed = 0;
struct mlx4_cqe *cqe, *dest;
u8 owner_bit;
int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0;

/*
* First we need to find the current producer index, so we
Expand All @@ -839,16 +825,12 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
*/
while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
cqe += cqe_inc;

if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
++nfreed;
} else if (nfreed) {
dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
dest += cqe_inc;

owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK;
memcpy(dest, cqe, sizeof *cqe);
dest->owner_sr_opcode = owner_bit |
Expand Down
27 changes: 5 additions & 22 deletions trunk/drivers/infiniband/hw/mlx4/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -563,24 +563,15 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_ucontext *context;
struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3;
struct mlx4_ib_alloc_ucontext_resp resp;
int err;

if (!dev->ib_active)
return ERR_PTR(-EAGAIN);

if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
resp_v3.qp_tab_size = dev->dev->caps.num_qps;
resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size;
resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
} else {
resp.dev_caps = dev->dev->caps.userspace_caps;
resp.qp_tab_size = dev->dev->caps.num_qps;
resp.bf_reg_size = dev->dev->caps.bf_reg_size;
resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
resp.cqe_size = dev->dev->caps.cqe_size;
}
resp.qp_tab_size = dev->dev->caps.num_qps;
resp.bf_reg_size = dev->dev->caps.bf_reg_size;
resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;

context = kmalloc(sizeof *context, GFP_KERNEL);
if (!context)
Expand All @@ -595,11 +586,7 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);

if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
err = ib_copy_to_udata(udata, &resp_v3, sizeof(resp_v3));
else
err = ib_copy_to_udata(udata, &resp, sizeof(resp));

err = ib_copy_to_udata(udata, &resp, sizeof resp);
if (err) {
mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
kfree(context);
Expand Down Expand Up @@ -1355,11 +1342,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
ibdev->ib_dev.dma_device = &dev->pdev->dev;

if (dev->caps.userspace_caps)
ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
else
ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;

ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
ibdev->ib_dev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
(1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
Expand Down
1 change: 0 additions & 1 deletion trunk/drivers/infiniband/hw/mlx4/mlx4_ib.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,6 @@ struct mlx4_ib_xrcd {
struct mlx4_ib_cq_buf {
struct mlx4_buf buf;
struct mlx4_mtt mtt;
int entry_size;
};

struct mlx4_ib_cq_resize {
Expand Down
12 changes: 1 addition & 11 deletions trunk/drivers/infiniband/hw/mlx4/user.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,9 +40,7 @@
* Increment this value if any changes that break userspace ABI
* compatibility are made.
*/

#define MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION 3
#define MLX4_IB_UVERBS_ABI_VERSION 4
#define MLX4_IB_UVERBS_ABI_VERSION 3

/*
* Make sure that all structs defined in this file remain laid out so
Expand All @@ -52,18 +50,10 @@
* instead.
*/

struct mlx4_ib_alloc_ucontext_resp_v3 {
__u32 qp_tab_size;
__u16 bf_reg_size;
__u16 bf_regs_per_page;
};

struct mlx4_ib_alloc_ucontext_resp {
__u32 dev_caps;
__u32 qp_tab_size;
__u16 bf_reg_size;
__u16 bf_regs_per_page;
__u32 cqe_size;
};

struct mlx4_ib_alloc_pd_resp {
Expand Down
8 changes: 5 additions & 3 deletions trunk/drivers/infiniband/hw/nes/nes_cm.c
Original file line number Diff line number Diff line change
Expand Up @@ -629,9 +629,11 @@ static void build_rdma0_msg(struct nes_cm_node *cm_node, struct nes_qp **nesqp_a

case SEND_RDMA_READ_ZERO:
default:
if (cm_node->send_rdma0_op != SEND_RDMA_READ_ZERO)
WARN(1, "Unsupported RDMA0 len operation=%u\n",
cm_node->send_rdma0_op);
if (cm_node->send_rdma0_op != SEND_RDMA_READ_ZERO) {
printk(KERN_ERR "%s[%u]: Unsupported RDMA0 len operation=%u\n",
__func__, __LINE__, cm_node->send_rdma0_op);
WARN_ON(1);
}
nes_debug(NES_DBG_CM, "Sending first rdma operation.\n");
wqe->wqe_words[NES_IWARP_SQ_WQE_MISC_IDX] =
cpu_to_le32(NES_IWARP_SQ_OP_RDMAR);
Expand Down
Loading

0 comments on commit 5d72072

Please sign in to comment.