Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 48788
b: refs/heads/master
c: 78d8d5f
h: refs/heads/master
v: v3
  • Loading branch information
Hoang-Nam Nguyen authored and Roland Dreier committed Feb 16, 2007
1 parent 2a6911c commit a49f7bd
Show file tree
Hide file tree
Showing 7 changed files with 184 additions and 93 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 551fd6122d247d76124c4fdb6eb898cc8e3d74aa
refs/heads/master: 78d8d5f9ef8d6179e92b94481cfdfc45d396992f
18 changes: 13 additions & 5 deletions trunk/drivers/infiniband/hw/ehca/ehca_classes.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,6 @@
#ifndef __EHCA_CLASSES_H__
#define __EHCA_CLASSES_H__

#include "ehca_classes.h"
#include "ipz_pt_fn.h"

struct ehca_module;
struct ehca_qp;
Expand All @@ -54,14 +52,22 @@ struct ehca_mw;
struct ehca_pd;
struct ehca_av;

#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>

#ifdef CONFIG_PPC64
#include "ehca_classes_pSeries.h"
#endif
#include "ipz_pt_fn.h"
#include "ehca_qes.h"
#include "ehca_irq.h"

#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#define EHCA_EQE_CACHE_SIZE 20

#include "ehca_irq.h"
struct ehca_eqe_cache_entry {
struct ehca_eqe *eqe;
struct ehca_cq *cq;
};

struct ehca_eq {
u32 length;
Expand All @@ -74,6 +80,8 @@ struct ehca_eq {
spinlock_t spinlock;
struct tasklet_struct interrupt_task;
u32 ist;
spinlock_t irq_spinlock;
struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE];
};

struct ehca_sport {
Expand Down
1 change: 1 addition & 0 deletions trunk/drivers/infiniband/hw/ehca/ehca_eq.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ int ehca_create_eq(struct ehca_shca *shca,
struct ib_device *ib_dev = &shca->ib_device;

spin_lock_init(&eq->spinlock);
spin_lock_init(&eq->irq_spinlock);
eq->is_initialized = 0;

if (type != EHCA_EQ && type != EHCA_NEQ) {
Expand Down
216 changes: 136 additions & 80 deletions trunk/drivers/infiniband/hw/ehca/ehca_irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ static void qp_event_callback(struct ehca_shca *shca,
}

static void cq_event_callback(struct ehca_shca *shca,
u64 eqe)
u64 eqe)
{
struct ehca_cq *cq;
unsigned long flags;
Expand Down Expand Up @@ -318,15 +318,15 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
"disruptive port %x configuration change", port);

ehca_info(&shca->ib_device,
"port %x is inactive.", port);
"port %x is inactive.", port);
event.device = &shca->ib_device;
event.event = IB_EVENT_PORT_ERR;
event.element.port_num = port;
shca->sport[port - 1].port_state = IB_PORT_DOWN;
ib_dispatch_event(&event);

ehca_info(&shca->ib_device,
"port %x is active.", port);
"port %x is active.", port);
event.device = &shca->ib_device;
event.event = IB_EVENT_PORT_ACTIVE;
event.element.port_num = port;
Expand Down Expand Up @@ -401,87 +401,143 @@ irqreturn_t ehca_interrupt_eq(int irq, void *dev_id)
return IRQ_HANDLED;
}

void ehca_tasklet_eq(unsigned long data)
{
struct ehca_shca *shca = (struct ehca_shca*)data;
struct ehca_eqe *eqe;
int int_state;
int query_cnt = 0;

do {
eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);

if ((shca->hw_level >= 2) && eqe)
int_state = 1;
else
int_state = 0;

while ((int_state == 1) || eqe) {
while (eqe) {
u64 eqe_value = eqe->entry;

ehca_dbg(&shca->ib_device,
"eqe_value=%lx", eqe_value);

/* TODO: better structure */
if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT,
eqe_value)) {
unsigned long flags;
u32 token;
struct ehca_cq *cq;

ehca_dbg(&shca->ib_device,
"... completion event");
token =
EHCA_BMASK_GET(EQE_CQ_TOKEN,
eqe_value);
spin_lock_irqsave(&ehca_cq_idr_lock,
flags);
cq = idr_find(&ehca_cq_idr, token);

if (cq == NULL) {
spin_unlock_irqrestore(&ehca_cq_idr_lock,
flags);
break;
}

reset_eq_pending(cq);
static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
{
u64 eqe_value;
u32 token;
unsigned long flags;
struct ehca_cq *cq;
eqe_value = eqe->entry;
ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);
if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
ehca_dbg(&shca->ib_device, "... completion event");
token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
spin_lock_irqsave(&ehca_cq_idr_lock, flags);
cq = idr_find(&ehca_cq_idr, token);
if (cq == NULL) {
spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
ehca_err(&shca->ib_device,
"Invalid eqe for non-existing cq token=%x",
token);
return;
}
reset_eq_pending(cq);
#ifdef CONFIG_INFINIBAND_EHCA_SCALING
queue_comp_task(cq);
spin_unlock_irqrestore(&ehca_cq_idr_lock,
flags);
queue_comp_task(cq);
spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
#else
spin_unlock_irqrestore(&ehca_cq_idr_lock,
flags);
comp_event_callback(cq);
spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
comp_event_callback(cq);
#endif
} else {
ehca_dbg(&shca->ib_device,
"... non completion event");
parse_identifier(shca, eqe_value);
}
eqe =
(struct ehca_eqe *)ehca_poll_eq(shca,
&shca->eq);
}
} else {
ehca_dbg(&shca->ib_device,
"Got non completion event");
parse_identifier(shca, eqe_value);
}
}

if (shca->hw_level >= 2) {
int_state =
hipz_h_query_int_state(shca->ipz_hca_handle,
shca->eq.ist);
query_cnt++;
iosync();
if (query_cnt >= 100) {
query_cnt = 0;
int_state = 0;
}
}
eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
void ehca_process_eq(struct ehca_shca *shca, int is_irq)
{
struct ehca_eq *eq = &shca->eq;
struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
u64 eqe_value;
unsigned long flags;
int eqe_cnt, i;
int eq_empty = 0;

spin_lock_irqsave(&eq->irq_spinlock, flags);
if (is_irq) {
const int max_query_cnt = 100;
int query_cnt = 0;
int int_state = 1;
do {
int_state = hipz_h_query_int_state(
shca->ipz_hca_handle, eq->ist);
query_cnt++;
iosync();
} while (int_state && query_cnt < max_query_cnt);
if (unlikely((query_cnt == max_query_cnt)))
ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
int_state, query_cnt);
}

/* read out all eqes */
eqe_cnt = 0;
do {
u32 token;
eqe_cache[eqe_cnt].eqe =
(struct ehca_eqe *)ehca_poll_eq(shca, eq);
if (!eqe_cache[eqe_cnt].eqe)
break;
eqe_value = eqe_cache[eqe_cnt].eqe->entry;
if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
spin_lock(&ehca_cq_idr_lock);
eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
if (!eqe_cache[eqe_cnt].cq) {
spin_unlock(&ehca_cq_idr_lock);
ehca_err(&shca->ib_device,
"Invalid eqe for non-existing cq "
"token=%x", token);
continue;
}
spin_unlock(&ehca_cq_idr_lock);
} else
eqe_cache[eqe_cnt].cq = NULL;
eqe_cnt++;
} while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
if (!eqe_cnt) {
if (is_irq)
ehca_dbg(&shca->ib_device,
"No eqe found for irq event");
goto unlock_irq_spinlock;
} else if (!is_irq)
ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
/* enable irq for new packets */
for (i = 0; i < eqe_cnt; i++) {
if (eq->eqe_cache[i].cq)
reset_eq_pending(eq->eqe_cache[i].cq);
}
/* check eq */
spin_lock(&eq->spinlock);
eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
spin_unlock(&eq->spinlock);
/* call completion handler for cached eqes */
for (i = 0; i < eqe_cnt; i++)
if (eq->eqe_cache[i].cq) {
#ifdef CONFIG_INFINIBAND_EHCA_SCALING
spin_lock(&ehca_cq_idr_lock);
queue_comp_task(eq->eqe_cache[i].cq);
spin_unlock(&ehca_cq_idr_lock);
#else
comp_event_callback(eq->eqe_cache[i].cq);
#endif
} else {
ehca_dbg(&shca->ib_device, "Got non completion event");
parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
}
} while (int_state != 0);
/* poll eq if not empty */
if (eq_empty)
goto unlock_irq_spinlock;
do {
struct ehca_eqe *eqe;
eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq);
if (!eqe)
break;
process_eqe(shca, eqe);
eqe_cnt++;
} while (1);

unlock_irq_spinlock:
spin_unlock_irqrestore(&eq->irq_spinlock, flags);
}

return;
void ehca_tasklet_eq(unsigned long data)
{
ehca_process_eq((struct ehca_shca*)data, 1);
}

#ifdef CONFIG_INFINIBAND_EHCA_SCALING
Expand Down Expand Up @@ -654,11 +710,11 @@ static void take_over_work(struct ehca_comp_pool *pool,
list_splice_init(&cct->cq_list, &list);

while(!list_empty(&list)) {
cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);

list_del(&cq->entry);
__queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
smp_processor_id()));
list_del(&cq->entry);
__queue_comp_task(cq, per_cpu_ptr(pool->cpu_comp_tasks,
smp_processor_id()));
}

spin_unlock_irqrestore(&cct->task_lock, flags_cct);
Expand Down
1 change: 1 addition & 0 deletions trunk/drivers/infiniband/hw/ehca/ehca_irq.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ void ehca_tasklet_neq(unsigned long data);

irqreturn_t ehca_interrupt_eq(int irq, void *dev_id);
void ehca_tasklet_eq(unsigned long data);
void ehca_process_eq(struct ehca_shca *shca, int is_irq);

struct ehca_cpu_comp_task {
wait_queue_head_t wait_queue;
Expand Down
28 changes: 22 additions & 6 deletions trunk/drivers/infiniband/hw/ehca/ehca_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
MODULE_VERSION("SVNEHCA_0020");
MODULE_VERSION("SVNEHCA_0021");

int ehca_open_aqp1 = 0;
int ehca_debug_level = 0;
Expand Down Expand Up @@ -432,8 +432,8 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport)

static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n",
ehca_debug_level);
return snprintf(buf, PAGE_SIZE, "%d\n",
ehca_debug_level);
}

static ssize_t ehca_store_debug_level(struct device_driver *ddp,
Expand Down Expand Up @@ -778,8 +778,24 @@ void ehca_poll_eqs(unsigned long data)

spin_lock(&shca_list_lock);
list_for_each_entry(shca, &shca_list, shca_list) {
if (shca->eq.is_initialized)
ehca_tasklet_eq((unsigned long)(void*)shca);
if (shca->eq.is_initialized) {
/* call deadman proc only if eq ptr does not change */
struct ehca_eq *eq = &shca->eq;
int max = 3;
volatile u64 q_ofs, q_ofs2;
u64 flags;
spin_lock_irqsave(&eq->spinlock, flags);
q_ofs = eq->ipz_queue.current_q_offset;
spin_unlock_irqrestore(&eq->spinlock, flags);
do {
spin_lock_irqsave(&eq->spinlock, flags);
q_ofs2 = eq->ipz_queue.current_q_offset;
spin_unlock_irqrestore(&eq->spinlock, flags);
max--;
} while (q_ofs == q_ofs2 && max > 0);
if (q_ofs == q_ofs2)
ehca_process_eq(shca, 0);
}
}
mod_timer(&poll_eqs_timer, jiffies + HZ);
spin_unlock(&shca_list_lock);
Expand All @@ -790,7 +806,7 @@ int __init ehca_module_init(void)
int ret;

printk(KERN_INFO "eHCA Infiniband Device Driver "
"(Rel.: SVNEHCA_0020)\n");
"(Rel.: SVNEHCA_0021)\n");
idr_init(&ehca_qp_idr);
idr_init(&ehca_cq_idr);
spin_lock_init(&ehca_qp_idr_lock);
Expand Down
Loading

0 comments on commit a49f7bd

Please sign in to comment.