Skip to content

Commit

Permalink
IB/ehca: Fix problem with generated flush work completions
Browse files Browse the repository at this point in the history
This fix enables ehca device driver to generate flush work completions
even if the application doesn't request completions for all work
requests. The current implementation of ehca will generate flush work
completions for the wrong work requests if an application uses non
signaled work completions.

Signed-off-by: Stefan Roscher <stefan.roscher@de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
  • Loading branch information
Stefan Roscher authored and Roland Dreier committed Dec 1, 2008
1 parent 6b1f9d6 commit 7ec4f46
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 28 deletions.
4 changes: 3 additions & 1 deletion drivers/infiniband/hw/ehca/ehca_classes.h
Original file line number Diff line number Diff line change
Expand Up @@ -163,14 +163,16 @@ struct ehca_mod_qp_parm {
/* struct for tracking if cqes have been reported to the application */
struct ehca_qmap_entry {
u16 app_wr_id;
u16 reported;
u8 reported;
u8 cqe_req;
};

struct ehca_queue_map {
struct ehca_qmap_entry *map;
unsigned int entries;
unsigned int tail;
unsigned int left_to_poll;
unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */
};

struct ehca_qp {
Expand Down
26 changes: 20 additions & 6 deletions drivers/infiniband/hw/ehca/ehca_qp.c
Original file line number Diff line number Diff line change
Expand Up @@ -435,9 +435,13 @@ static void reset_queue_map(struct ehca_queue_map *qmap)
{
int i;

qmap->tail = 0;
for (i = 0; i < qmap->entries; i++)
qmap->tail = qmap->entries - 1;
qmap->left_to_poll = 0;
qmap->next_wqe_idx = 0;
for (i = 0; i < qmap->entries; i++) {
qmap->map[i].reported = 1;
qmap->map[i].cqe_req = 0;
}
}

/*
Expand Down Expand Up @@ -1121,6 +1125,7 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
void *wqe_v;
u64 q_ofs;
u32 wqe_idx;
unsigned int tail_idx;

/* convert real to abs address */
wqe_p = wqe_p & (~(1UL << 63));
Expand All @@ -1133,12 +1138,17 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
return -EFAULT;
}

tail_idx = (qmap->tail + 1) % qmap->entries;
wqe_idx = q_ofs / ipz_queue->qe_size;
if (wqe_idx < qmap->tail)
qmap->left_to_poll = (qmap->entries - qmap->tail) + wqe_idx;
else
qmap->left_to_poll = wqe_idx - qmap->tail;

/* check all processed wqes, whether a cqe is requested or not */
while (tail_idx != wqe_idx) {
if (qmap->map[tail_idx].cqe_req)
qmap->left_to_poll++;
tail_idx = (tail_idx + 1) % qmap->entries;
}
/* save index in queue, where we have to start flushing */
qmap->next_wqe_idx = wqe_idx;
return 0;
}

Expand Down Expand Up @@ -1185,10 +1195,14 @@ static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
} else {
spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
my_qp->sq_map.left_to_poll = 0;
my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
my_qp->sq_map.entries;
spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);

spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
my_qp->rq_map.left_to_poll = 0;
my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
my_qp->rq_map.entries;
spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
}

Expand Down
51 changes: 30 additions & 21 deletions drivers/infiniband/hw/ehca/ehca_reqs.c
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,

qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id);
qmap_entry->reported = 0;
qmap_entry->cqe_req = 0;

switch (send_wr->opcode) {
case IB_WR_SEND:
Expand All @@ -203,8 +204,10 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,

if ((send_wr->send_flags & IB_SEND_SIGNALED ||
qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
&& !hidden)
&& !hidden) {
wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
qmap_entry->cqe_req = 1;
}

if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
Expand Down Expand Up @@ -569,6 +572,7 @@ static int internal_post_recv(struct ehca_qp *my_qp,
qmap_entry = &my_qp->rq_map.map[rq_map_idx];
qmap_entry->app_wr_id = get_app_wr_id(cur_recv_wr->wr_id);
qmap_entry->reported = 0;
qmap_entry->cqe_req = 1;

wqe_cnt++;
} /* eof for cur_recv_wr */
Expand Down Expand Up @@ -706,27 +710,34 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
goto repoll;
wc->qp = &my_qp->ib_qp;

qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
/* We got a send completion. */
qmap = &my_qp->sq_map;
else
/* We got a receive completion. */
qmap = &my_qp->rq_map;

/* advance the tail pointer */
qmap->tail = qmap_tail_idx;

if (is_error) {
/*
* set left_to_poll to 0 because in error state, we will not
* get any additional CQEs
*/
ehca_add_to_err_list(my_qp, 1);
my_qp->sq_map.next_wqe_idx = (my_qp->sq_map.tail + 1) %
my_qp->sq_map.entries;
my_qp->sq_map.left_to_poll = 0;
ehca_add_to_err_list(my_qp, 1);

my_qp->rq_map.next_wqe_idx = (my_qp->rq_map.tail + 1) %
my_qp->rq_map.entries;
my_qp->rq_map.left_to_poll = 0;
if (HAS_RQ(my_qp))
ehca_add_to_err_list(my_qp, 0);
my_qp->rq_map.left_to_poll = 0;
}

qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
/* We got a send completion. */
qmap = &my_qp->sq_map;
else
/* We got a receive completion. */
qmap = &my_qp->rq_map;

qmap_entry = &qmap->map[qmap_tail_idx];
if (qmap_entry->reported) {
ehca_warn(cq->device, "Double cqe on qp_num=%#x",
Expand All @@ -738,10 +749,6 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id);
qmap_entry->reported = 1;

/* this is a proper completion, we need to advance the tail pointer */
if (++qmap->tail == qmap->entries)
qmap->tail = 0;

/* if left_to_poll is decremented to 0, add the QP to the error list */
if (qmap->left_to_poll > 0) {
qmap->left_to_poll--;
Expand Down Expand Up @@ -805,13 +812,14 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
else
qmap = &my_qp->rq_map;

qmap_entry = &qmap->map[qmap->tail];
qmap_entry = &qmap->map[qmap->next_wqe_idx];

while ((nr < num_entries) && (qmap_entry->reported == 0)) {
/* generate flush CQE */

memset(wc, 0, sizeof(*wc));

offset = qmap->tail * ipz_queue->qe_size;
offset = qmap->next_wqe_idx * ipz_queue->qe_size;
wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
if (!wqe) {
ehca_err(cq->device, "Invalid wqe offset=%#lx on "
Expand Down Expand Up @@ -850,11 +858,12 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,

wc->qp = &my_qp->ib_qp;

/* mark as reported and advance tail pointer */
/* mark as reported and advance next_wqe pointer */
qmap_entry->reported = 1;
if (++qmap->tail == qmap->entries)
qmap->tail = 0;
qmap_entry = &qmap->map[qmap->tail];
qmap->next_wqe_idx++;
if (qmap->next_wqe_idx == qmap->entries)
qmap->next_wqe_idx = 0;
qmap_entry = &qmap->map[qmap->next_wqe_idx];

wc++; nr++;
}
Expand Down

0 comments on commit 7ec4f46

Please sign in to comment.