Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 256667
b: refs/heads/master
c: fd1b6c4
h: refs/heads/master
i:
  256665: 366c034
  256663: f5973bd
v: v3
  • Loading branch information
Bart Van Assche authored and Roland Dreier committed Jul 13, 2011
1 parent 9316174 commit 8bd4d9b
Show file tree
Hide file tree
Showing 9 changed files with 42 additions and 112 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: e67306a38063d75f61d405527ff8bf1c8e92eb84
refs/heads/master: fd1b6c4a693c9cac59375ffb36ffe5d7c079037c
11 changes: 3 additions & 8 deletions trunk/MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -3425,9 +3425,10 @@ S: Maintained
F: drivers/net/ipg.*

IPATH DRIVER
M: Mike Marciniszyn <infinipath@qlogic.com>
M: Ralph Campbell <infinipath@qlogic.com>
L: linux-rdma@vger.kernel.org
S: Maintained
T: git git://git.qlogic.com/ipath-linux-2.6
S: Supported
F: drivers/infiniband/hw/ipath/

IPMI SUBSYSTEM
Expand Down Expand Up @@ -5151,12 +5152,6 @@ M: Robert Jarzmik <robert.jarzmik@free.fr>
L: rtc-linux@googlegroups.com
S: Maintained

QIB DRIVER
M: Mike Marciniszyn <infinipath@qlogic.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/hw/qib/

QLOGIC QLA1280 SCSI DRIVER
M: Michael Reed <mdr@sgi.com>
L: linux-scsi@vger.kernel.org
Expand Down
3 changes: 0 additions & 3 deletions trunk/drivers/infiniband/hw/qib/qib.h
Original file line number Diff line number Diff line change
Expand Up @@ -1012,8 +1012,6 @@ struct qib_devdata {
u8 psxmitwait_supported;
/* cycle length of PS* counters in HW (in picoseconds) */
u16 psxmitwait_check_rate;
/* high volume overflow errors defered to tasklet */
struct tasklet_struct error_tasklet;
};

/* hol_state values */
Expand Down Expand Up @@ -1435,7 +1433,6 @@ extern struct mutex qib_mutex;
struct qib_hwerror_msgs {
u64 mask;
const char *msg;
size_t sz;
};

#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
Expand Down
16 changes: 7 additions & 9 deletions trunk/drivers/infiniband/hw/qib/qib_file_ops.c
Original file line number Diff line number Diff line change
Expand Up @@ -1527,7 +1527,6 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
struct qib_filedata *fd = fp->private_data;
const struct qib_ctxtdata *rcd = fd->rcd;
const struct qib_devdata *dd = rcd->dd;
unsigned int weight;

if (dd->flags & QIB_HAS_SEND_DMA) {
fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
Expand All @@ -1546,22 +1545,22 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
* it just means that sooner or later we don't recommend
* a cpu, and let the scheduler do it's best.
*/
weight = cpumask_weight(tsk_cpus_allowed(current));
if (!ret && weight >= qib_cpulist_count) {
if (!ret && cpus_weight(current->cpus_allowed) >=
qib_cpulist_count) {
int cpu;
cpu = find_first_zero_bit(qib_cpulist,
qib_cpulist_count);
if (cpu != qib_cpulist_count) {
__set_bit(cpu, qib_cpulist);
fd->rec_cpu_num = cpu;
}
} else if (weight == 1 &&
test_bit(cpumask_first(tsk_cpus_allowed(current)),
} else if (cpus_weight(current->cpus_allowed) == 1 &&
test_bit(first_cpu(current->cpus_allowed),
qib_cpulist))
qib_devinfo(dd->pcidev, "%s PID %u affinity "
"set to cpu %d; already allocated\n",
current->comm, current->pid,
cpumask_first(tsk_cpus_allowed(current)));
first_cpu(current->cpus_allowed));
}

mutex_unlock(&qib_mutex);
Expand Down Expand Up @@ -1905,9 +1904,8 @@ int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
struct qib_ctxtdata *rcd;
unsigned ctxt;
int ret = 0;
unsigned long flags;

spin_lock_irqsave(&ppd->dd->uctxt_lock, flags);
spin_lock(&ppd->dd->uctxt_lock);
for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
ctxt++) {
rcd = ppd->dd->rcd[ctxt];
Expand All @@ -1926,7 +1924,7 @@ int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
ret = 1;
break;
}
spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags);
spin_unlock(&ppd->dd->uctxt_lock);

return ret;
}
Expand Down
26 changes: 3 additions & 23 deletions trunk/drivers/infiniband/hw/qib/qib_iba7220.c
Original file line number Diff line number Diff line change
Expand Up @@ -2434,7 +2434,6 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
int lsb, ret = 0, setforce = 0;
u16 lcmd, licmd;
unsigned long flags;
u32 tmp = 0;

switch (which) {
case QIB_IB_CFG_LIDLMC:
Expand Down Expand Up @@ -2468,6 +2467,9 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
maskr = IBA7220_IBC_WIDTH_MASK;
lsb = IBA7220_IBC_WIDTH_SHIFT;
setforce = 1;
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
break;

case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
Expand Down Expand Up @@ -2641,28 +2643,6 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
goto bail;
}
qib_set_ib_7220_lstate(ppd, lcmd, licmd);

maskr = IBA7220_IBC_WIDTH_MASK;
lsb = IBA7220_IBC_WIDTH_SHIFT;
tmp = (ppd->cpspec->ibcddrctrl >> lsb) & maskr;
/* If the width active on the chip does not match the
* width in the shadow register, write the new active
* width to the chip.
* We don't have to worry about speed as the speed is taken
* care of by set_7220_ibspeed_fast called by ib_updown.
*/
if (ppd->link_width_enabled-1 != tmp) {
ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
ppd->cpspec->ibcddrctrl |=
(((u64)(ppd->link_width_enabled-1) & maskr) <<
lsb);
qib_write_kreg(dd, kr_ibcddrctrl,
ppd->cpspec->ibcddrctrl);
qib_write_kreg(dd, kr_scratch, 0);
spin_lock_irqsave(&ppd->lflags_lock, flags);
ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
}
goto bail;

case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
Expand Down
72 changes: 22 additions & 50 deletions trunk/drivers/infiniband/hw/qib/qib_iba7322.c
Original file line number Diff line number Diff line change
Expand Up @@ -114,10 +114,6 @@ static ushort qib_singleport;
module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");

static ushort qib_krcvq01_no_msi;
module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");

/*
* Receive header queue sizes
*/
Expand Down Expand Up @@ -401,6 +397,7 @@ MODULE_PARM_DESC(txselect, \
#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
#define crp_txlenerr CREG_IDX(TxLenErrCnt)
#define crp_txlenerr CREG_IDX(TxLenErrCnt)
#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
Expand Down Expand Up @@ -1110,9 +1107,9 @@ static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */

#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
.msg = #fldname , .sz = sizeof(#fldname) }
.msg = #fldname }
#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
fldname##Mask##_##port), .msg = #fldname }
static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
Expand All @@ -1130,16 +1127,14 @@ static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
HWE_AUTO(statusValidNoEop),
HWE_AUTO(LATriggered),
{ .mask = 0, .sz = 0 }
{ .mask = 0 }
};

#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
.msg = #fldname, .sz = sizeof(#fldname) }
.msg = #fldname }
#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
.msg = #fldname, .sz = sizeof(#fldname) }
.msg = #fldname }
static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
E_AUTO(RcvEgrFullErr),
E_AUTO(RcvHdrFullErr),
E_AUTO(ResetNegated),
E_AUTO(HardwareErr),
E_AUTO(InvalidAddrErr),
Expand All @@ -1152,7 +1147,9 @@ static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
E_AUTO(SendSpecialTriggerErr),
E_AUTO(SDmaWrongPortErr),
E_AUTO(SDmaBufMaskDuplicateErr),
{ .mask = 0, .sz = 0 }
E_AUTO(RcvHdrFullErr),
E_AUTO(RcvEgrFullErr),
{ .mask = 0 }
};

static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
Expand All @@ -1162,8 +1159,7 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
/*
* SDmaHaltErr is not really an error, make it clearer;
*/
{.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
.sz = 11},
{.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted"},
E_P_AUTO(SDmaDescAddrMisalignErr),
E_P_AUTO(SDmaUnexpDataErr),
E_P_AUTO(SDmaMissingDwErr),
Expand Down Expand Up @@ -1199,33 +1195,33 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
E_P_AUTO(RcvICRCErr),
E_P_AUTO(RcvVCRCErr),
E_P_AUTO(RcvFormatErr),
{ .mask = 0, .sz = 0 }
{ .mask = 0 }
};

/*
* Below generates "auto-message" for interrupts not specific to any port or
* context
*/
#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
.msg = #fldname, .sz = sizeof(#fldname) }
.msg = #fldname }
/* Below generates "auto-message" for interrupts specific to a port */
#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##Mask##_0), \
SYM_LSB(IntMask, fldname##Mask##_1)), \
.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
.msg = #fldname "_P" }
/* For some reason, the SerDesTrimDone bits are reversed */
#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##Mask##_1), \
SYM_LSB(IntMask, fldname##Mask##_0)), \
.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
.msg = #fldname "_P" }
/*
* Below generates "auto-message" for interrupts specific to a context,
* with ctxt-number appended
*/
#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##0IntMask), \
SYM_LSB(IntMask, fldname##17IntMask)), \
.msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
.msg = #fldname "_C"}

static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
INTR_AUTO_P(SDmaInt),
Expand All @@ -1239,12 +1235,11 @@ static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
INTR_AUTO_P(SendDoneInt),
INTR_AUTO(SendBufAvailInt),
INTR_AUTO_C(RcvAvail),
{ .mask = 0, .sz = 0 }
{ .mask = 0 }
};

#define TXSYMPTOM_AUTO_P(fldname) \
{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
.msg = #fldname, .sz = sizeof(#fldname) }
{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname }
static const struct qib_hwerror_msgs hdrchk_msgs[] = {
TXSYMPTOM_AUTO_P(NonKeyPacket),
TXSYMPTOM_AUTO_P(GRHFail),
Expand All @@ -1253,7 +1248,7 @@ static const struct qib_hwerror_msgs hdrchk_msgs[] = {
TXSYMPTOM_AUTO_P(SLIDFail),
TXSYMPTOM_AUTO_P(RawIPV6),
TXSYMPTOM_AUTO_P(PacketTooSmall),
{ .mask = 0, .sz = 0 }
{ .mask = 0 }
};

#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
Expand Down Expand Up @@ -1298,7 +1293,7 @@ static void err_decode(char *msg, size_t len, u64 errs,
u64 these, lmask;
int took, multi, n = 0;

while (errs && msp && msp->mask) {
while (msp && msp->mask) {
multi = (msp->mask & (msp->mask - 1));
while (errs & msp->mask) {
these = (errs & msp->mask);
Expand All @@ -1309,14 +1304,9 @@ static void err_decode(char *msg, size_t len, u64 errs,
*msg++ = ',';
len--;
}
BUG_ON(!msp->sz);
/* msp->sz counts the nul */
took = min_t(size_t, msp->sz - (size_t)1, len);
memcpy(msg, msp->msg, took);
took = scnprintf(msg, len, "%s", msp->msg);
len -= took;
msg += took;
if (len)
*msg = '\0';
}
errs &= ~lmask;
if (len && multi) {
Expand Down Expand Up @@ -1654,14 +1644,6 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
return;
}

static void qib_error_tasklet(unsigned long data)
{
struct qib_devdata *dd = (struct qib_devdata *)data;

handle_7322_errors(dd);
qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
}

static void reenable_chase(unsigned long opaque)
{
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
Expand Down Expand Up @@ -2743,10 +2725,8 @@ static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
unknown_7322_ibits(dd, istat);
if (istat & QIB_I_GPIO)
unknown_7322_gpio_intr(dd);
if (istat & QIB_I_C_ERROR) {
qib_write_kreg(dd, kr_errmask, 0ULL);
tasklet_schedule(&dd->error_tasklet);
}
if (istat & QIB_I_C_ERROR)
handle_7322_errors(dd);
if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
handle_7322_p_errors(dd->rcd[0]->ppd);
if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
Expand Down Expand Up @@ -3145,8 +3125,6 @@ static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
arg = dd->rcd[ctxt];
if (!arg)
continue;
if (qib_krcvq01_no_msi && ctxt < 2)
continue;
lsb = QIB_I_RCVAVAIL_LSB + ctxt;
handler = qib_7322pintr;
name = QIB_DRV_NAME " (kctx)";
Expand Down Expand Up @@ -3181,8 +3159,6 @@ static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
for (i = 0; i < ARRAY_SIZE(redirect); i++)
qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
dd->cspec->main_int_mask = mask;
tasklet_init(&dd->error_tasklet, qib_error_tasklet,
(unsigned long)dd);
bail:;
}

Expand Down Expand Up @@ -6812,10 +6788,6 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
(i >= ARRAY_SIZE(irq_table) &&
dd->rcd[i - ARRAY_SIZE(irq_table)]))
actual_cnt++;
/* reduce by ctxt's < 2 */
if (qib_krcvq01_no_msi)
actual_cnt -= dd->num_pports;

tabsize = actual_cnt;
dd->cspec->msix_entries = kmalloc(tabsize *
sizeof(struct msix_entry), GFP_KERNEL);
Expand Down
Loading

0 comments on commit 8bd4d9b

Please sign in to comment.