From 10854126ff6a3c924676019592213ed37a8fc554 Mon Sep 17 00:00:00 2001 From: David Disseldorp Date: Sun, 21 Dec 2008 13:56:50 -0800 Subject: [PATCH] --- yaml --- r: 121452 b: refs/heads/master c: bba7ebba3b17f4fe8c5907a32e16d9bd3fcf5192 h: refs/heads/master v: v3 --- [refs] | 2 +- .../infiniband/hw/ipath/ipath_driver.c | 49 +++---- .../infiniband/hw/ipath/ipath_file_ops.c | 30 ++-- trunk/drivers/infiniband/hw/ipath/ipath_fs.c | 2 +- .../infiniband/hw/ipath/ipath_iba6120.c | 61 -------- .../infiniband/hw/ipath/ipath_iba7220.c | 83 +---------- .../infiniband/hw/ipath/ipath_init_chip.c | 1 - .../infiniband/hw/ipath/ipath_kernel.h | 15 -- .../drivers/infiniband/hw/ipath/ipath_keys.c | 2 - trunk/drivers/infiniband/hw/ipath/ipath_mad.c | 2 - trunk/drivers/infiniband/hw/ipath/ipath_qp.c | 32 ++--- trunk/drivers/infiniband/hw/ipath/ipath_rc.c | 5 +- .../drivers/infiniband/hw/ipath/ipath_sdma.c | 21 +-- .../drivers/infiniband/hw/ipath/ipath_stats.c | 8 -- trunk/drivers/infiniband/hw/ipath/ipath_ud.c | 19 ++- .../drivers/infiniband/hw/ipath/ipath_verbs.c | 3 +- .../drivers/infiniband/hw/ipath/ipath_verbs.h | 1 - .../drivers/infiniband/ulp/iser/iscsi_iser.h | 3 + .../infiniband/ulp/iser/iser_initiator.c | 132 ++++++++++++------ .../drivers/infiniband/ulp/iser/iser_verbs.c | 1 + 20 files changed, 170 insertions(+), 302 deletions(-) diff --git a/[refs] b/[refs] index 9f09339950b6..6f393eebfc34 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 3d0890985ac4dff781b7feba19fedda547314749 +refs/heads/master: bba7ebba3b17f4fe8c5907a32e16d9bd3fcf5192 diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_driver.c b/trunk/drivers/infiniband/hw/ipath/ipath_driver.c index 69c0ce321b4e..ad0aab60b051 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_driver.c @@ -661,8 +661,6 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, static void __devexit cleanup_device(struct ipath_devdata *dd) { int port; - struct ipath_portdata **tmp; - unsigned long flags; if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) { /* can't do anything more with chip; needs re-init */ @@ -744,21 +742,20 @@ static void __devexit cleanup_device(struct ipath_devdata *dd) /* * free any resources still in use (usually just kernel ports) - * at unload; we do for portcnt, because that's what we allocate. - * We acquire lock to be really paranoid that ipath_pd isn't being - * accessed from some interrupt-related code (that should not happen, - * but best to be sure). + * at unload; we do for portcnt, not cfgports, because cfgports + * could have changed while we were loaded. */ - spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); - tmp = dd->ipath_pd; - dd->ipath_pd = NULL; - spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); for (port = 0; port < dd->ipath_portcnt; port++) { - struct ipath_portdata *pd = tmp[port]; - tmp[port] = NULL; /* debugging paranoia */ + struct ipath_portdata *pd = dd->ipath_pd[port]; + dd->ipath_pd[port] = NULL; ipath_free_pddata(dd, pd); } - kfree(tmp); + kfree(dd->ipath_pd); + /* + * debuggability, in case some cleanup path tries to use it + * after this + */ + dd->ipath_pd = NULL; } static void __devexit ipath_remove_one(struct pci_dev *pdev) @@ -2589,7 +2586,6 @@ int ipath_reset_device(int unit) { int ret, i; struct ipath_devdata *dd = ipath_lookup(unit); - unsigned long flags; if (!dd) { ret = -ENODEV; @@ -2615,21 +2611,18 @@ int ipath_reset_device(int unit) goto bail; } - spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); if (dd->ipath_pd) for (i = 1; i < dd->ipath_cfgports; i++) { - if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt) - continue; - spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); - ipath_dbg("unit %u port %d is in use " - "(PID %u cmd %s), can't reset\n", - unit, i, - pid_nr(dd->ipath_pd[i]->port_pid), - dd->ipath_pd[i]->port_comm); - ret = -EBUSY; - goto bail; + if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) { + ipath_dbg("unit %u port %d is in use " + "(PID %u cmd %s), can't reset\n", + unit, i, + pid_nr(dd->ipath_pd[i]->port_pid), + dd->ipath_pd[i]->port_comm); + ret = -EBUSY; + goto bail; + } } - spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); if (dd->ipath_flags & IPATH_HAS_SEND_DMA) teardown_sdma(dd); @@ -2663,12 +2656,9 @@ static int ipath_signal_procs(struct ipath_devdata *dd, int sig) { int i, sub, any = 0; struct pid *pid; - unsigned long flags; if (!dd->ipath_pd) return 0; - - spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); for (i = 1; i < dd->ipath_cfgports; i++) { if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt) continue; @@ -2692,7 +2682,6 @@ static int ipath_signal_procs(struct ipath_devdata *dd, int sig) any++; } } - spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); return any; } diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_file_ops.c b/trunk/drivers/infiniband/hw/ipath/ipath_file_ops.c index 239d4e8068ac..1af1f3a907c6 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_file_ops.c @@ -223,13 +223,8 @@ static int ipath_get_base_info(struct file *fp, (unsigned long long) kinfo->spi_subport_rcvhdr_base); } - /* - * All user buffers are 2KB buffers. If we ever support - * giving 4KB buffers to user processes, this will need some - * work. - */ - kinfo->spi_pioindex = (kinfo->spi_piobufbase - - (dd->ipath_piobufbase & 0xffffffff)) / dd->ipath_palign; + kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) / + dd->ipath_palign; kinfo->spi_pioalign = dd->ipath_palign; kinfo->spi_qpair = IPATH_KD_QP; @@ -2046,9 +2041,7 @@ static int ipath_close(struct inode *in, struct file *fp) struct ipath_filedata *fd; struct ipath_portdata *pd; struct ipath_devdata *dd; - unsigned long flags; unsigned port; - struct pid *pid; ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n", (long)in->i_rdev, fp->private_data); @@ -2081,13 +2074,14 @@ static int ipath_close(struct inode *in, struct file *fp) mutex_unlock(&ipath_mutex); goto bail; } - /* early; no interrupt users after this */ - spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); port = pd->port_port; - dd->ipath_pd[port] = NULL; - pid = pd->port_pid; - pd->port_pid = NULL; - spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); + + if (pd->port_hdrqfull) { + ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors " + "during run\n", pd->port_comm, pid_nr(pd->port_pid), + pd->port_hdrqfull); + pd->port_hdrqfull = 0; + } if (pd->port_rcvwait_to || pd->port_piowait_to || pd->port_rcvnowait || pd->port_pionowait) { @@ -2144,11 +2138,13 @@ static int ipath_close(struct inode *in, struct file *fp) unlock_expected_tids(pd); ipath_stats.sps_ports--; ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", - pd->port_comm, pid_nr(pid), + pd->port_comm, pid_nr(pd->port_pid), dd->ipath_unit, port); } - put_pid(pid); + put_pid(pd->port_pid); + pd->port_pid = NULL; + dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */ mutex_unlock(&ipath_mutex); ipath_free_pddata(dd, pd); /* after releasing the mutex */ diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_fs.c b/trunk/drivers/infiniband/hw/ipath/ipath_fs.c index 53912c327bfe..8bb5170b4e41 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_fs.c @@ -86,7 +86,7 @@ static int create_file(const char *name, mode_t mode, *dentry = NULL; mutex_lock(&parent->d_inode->i_mutex); *dentry = lookup_one_len(name, parent, strlen(name)); - if (!IS_ERR(*dentry)) + if (!IS_ERR(dentry)) error = ipathfs_mknod(parent->d_inode, *dentry, mode, fops, data); else diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_iba6120.c b/trunk/drivers/infiniband/hw/ipath/ipath_iba6120.c index fbf8c5379ea8..421cc2af891f 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_iba6120.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_iba6120.c @@ -721,12 +721,6 @@ static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) INFINIPATH_HWE_SERDESPLLFAILED); } - dd->ibdeltainprog = 1; - dd->ibsymsnap = - ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt); - dd->iblnkerrsnap = - ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt); - val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1); @@ -816,36 +810,6 @@ static void ipath_pe_quiet_serdes(struct ipath_devdata *dd) { u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); - if (dd->ibsymdelta || dd->iblnkerrdelta || - dd->ibdeltainprog) { - u64 diagc; - /* enable counter writes */ - diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl); - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, - diagc | INFINIPATH_DC_COUNTERWREN); - - if (dd->ibsymdelta || dd->ibdeltainprog) { - val = ipath_read_creg32(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt); - if (dd->ibdeltainprog) - val -= val - dd->ibsymsnap; - val -= dd->ibsymdelta; - ipath_write_creg(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt, val); - } - if (dd->iblnkerrdelta || dd->ibdeltainprog) { - val = ipath_read_creg32(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt); - if (dd->ibdeltainprog) - val -= val - dd->iblnkerrsnap; - val -= dd->iblnkerrdelta; - ipath_write_creg(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt, val); - } - - /* and disable counter writes */ - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc); - } val |= INFINIPATH_SERDC0_TXIDLE; ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n", (unsigned long long) val); @@ -1785,31 +1749,6 @@ static void ipath_pe_config_jint(struct ipath_devdata *dd, u16 a, u16 b) static int ipath_pe_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) { - if (ibup) { - if (dd->ibdeltainprog) { - dd->ibdeltainprog = 0; - dd->ibsymdelta += - ipath_read_creg32(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt) - - dd->ibsymsnap; - dd->iblnkerrdelta += - ipath_read_creg32(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt) - - dd->iblnkerrsnap; - } - } else { - dd->ipath_lli_counter = 0; - if (!dd->ibdeltainprog) { - dd->ibdeltainprog = 1; - dd->ibsymsnap = - ipath_read_creg32(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt); - dd->iblnkerrsnap = - ipath_read_creg32(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt); - } - } - ipath_setup_pe_setextled(dd, ipath_ib_linkstate(dd, ibcs), ipath_ib_linktrstate(dd, ibcs)); return 0; diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_iba7220.c b/trunk/drivers/infiniband/hw/ipath/ipath_iba7220.c index b2a9d4c155d1..9839e20119bc 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_iba7220.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_iba7220.c @@ -951,12 +951,6 @@ static int ipath_7220_bringup_serdes(struct ipath_devdata *dd) INFINIPATH_HWE_SERDESPLLFAILED); } - dd->ibdeltainprog = 1; - dd->ibsymsnap = - ipath_read_creg32(dd, dd->ipath_cregs->cr_ibsymbolerrcnt); - dd->iblnkerrsnap = - ipath_read_creg32(dd, dd->ipath_cregs->cr_iblinkerrrecovcnt); - if (!dd->ipath_ibcddrctrl) { /* not on re-init after reset */ dd->ipath_ibcddrctrl = @@ -1090,37 +1084,6 @@ static void ipath_7220_config_jint(struct ipath_devdata *dd, static void ipath_7220_quiet_serdes(struct ipath_devdata *dd) { u64 val; - if (dd->ibsymdelta || dd->iblnkerrdelta || - dd->ibdeltainprog) { - u64 diagc; - /* enable counter writes */ - diagc = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwdiagctrl); - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, - diagc | INFINIPATH_DC_COUNTERWREN); - - if (dd->ibsymdelta || dd->ibdeltainprog) { - val = ipath_read_creg32(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt); - if (dd->ibdeltainprog) - val -= val - dd->ibsymsnap; - val -= dd->ibsymdelta; - ipath_write_creg(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt, val); - } - if (dd->iblnkerrdelta || dd->ibdeltainprog) { - val = ipath_read_creg32(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt); - if (dd->ibdeltainprog) - val -= val - dd->iblnkerrsnap; - val -= dd->iblnkerrdelta; - ipath_write_creg(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt, val); - } - - /* and disable counter writes */ - ipath_write_kreg(dd, dd->ipath_kregs->kr_hwdiagctrl, diagc); - } - dd->ipath_flags &= ~IPATH_IB_AUTONEG_INPROG; wake_up(&dd->ipath_autoneg_wait); cancel_delayed_work(&dd->ipath_autoneg_work); @@ -2362,7 +2325,7 @@ static void try_auto_neg(struct ipath_devdata *dd) static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) { - int ret = 0, symadj = 0; + int ret = 0; u32 ltstate = ipath_ib_linkstate(dd, ibcs); dd->ipath_link_width_active = @@ -2405,13 +2368,6 @@ static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) ipath_dbg("DDR negotiation try, %u/%u\n", dd->ipath_autoneg_tries, IPATH_AUTONEG_TRIES); - if (!dd->ibdeltainprog) { - dd->ibdeltainprog = 1; - dd->ibsymsnap = ipath_read_creg32(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt); - dd->iblnkerrsnap = ipath_read_creg32(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt); - } try_auto_neg(dd); ret = 1; /* no other IB status change processing */ } else if ((dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) @@ -2432,7 +2388,6 @@ static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) set_speed_fast(dd, dd->ipath_link_speed_enabled); wake_up(&dd->ipath_autoneg_wait); - symadj = 1; } else if (dd->ipath_flags & IPATH_IB_AUTONEG_FAILED) { /* * clear autoneg failure flag, and do setup @@ -2448,28 +2403,22 @@ static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) IBA7220_IBC_IBTA_1_2_MASK; ipath_write_kreg(dd, IPATH_KREG_OFFSET(IBNCModeCtrl), 0); - symadj = 1; } } /* - * if we are in 1X on rev1 only, and are in autoneg width, - * it could be due to an xgxs problem, so if we haven't + * if we are in 1X, and are in autoneg width, it + * could be due to an xgxs problem, so if we haven't * already tried, try twice to get to 4X; if we * tried, and couldn't, report it, since it will * probably not be what is desired. */ - if (dd->ipath_minrev == 1 && - (dd->ipath_link_width_enabled & (IB_WIDTH_1X | + if ((dd->ipath_link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) == (IB_WIDTH_1X | IB_WIDTH_4X) && dd->ipath_link_width_active == IB_WIDTH_1X && dd->ipath_x1_fix_tries < 3) { - if (++dd->ipath_x1_fix_tries == 3) { + if (++dd->ipath_x1_fix_tries == 3) dev_info(&dd->pcidev->dev, "IB link is in 1X mode\n"); - if (!(dd->ipath_flags & - IPATH_IB_AUTONEG_INPROG)) - symadj = 1; - } else { ipath_cdbg(VERBOSE, "IB 1X in " "auto-width, try %u to be " @@ -2480,8 +2429,7 @@ static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) dd->ipath_f_xgxs_reset(dd); ret = 1; /* skip other processing */ } - } else if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) - symadj = 1; + } if (!ret) { dd->delay_mult = rate_to_delay @@ -2492,25 +2440,6 @@ static int ipath_7220_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) } } - if (symadj) { - if (dd->ibdeltainprog) { - dd->ibdeltainprog = 0; - dd->ibsymdelta += ipath_read_creg32(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt) - - dd->ibsymsnap; - dd->iblnkerrdelta += ipath_read_creg32(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt) - - dd->iblnkerrsnap; - } - } else if (!ibup && !dd->ibdeltainprog - && !(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG)) { - dd->ibdeltainprog = 1; - dd->ibsymsnap = ipath_read_creg32(dd, - dd->ipath_cregs->cr_ibsymbolerrcnt); - dd->iblnkerrsnap = ipath_read_creg32(dd, - dd->ipath_cregs->cr_iblinkerrrecovcnt); - } - if (!ret) ipath_setup_7220_setextled(dd, ipath_ib_linkstate(dd, ibcs), ltstate); diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_init_chip.c b/trunk/drivers/infiniband/hw/ipath/ipath_init_chip.c index 64aeefbd2a5d..3e5baa43fc82 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_init_chip.c @@ -229,7 +229,6 @@ static int init_chip_first(struct ipath_devdata *dd) spin_lock_init(&dd->ipath_kernel_tid_lock); spin_lock_init(&dd->ipath_user_tid_lock); spin_lock_init(&dd->ipath_sendctrl_lock); - spin_lock_init(&dd->ipath_uctxt_lock); spin_lock_init(&dd->ipath_sdma_lock); spin_lock_init(&dd->ipath_gpio_lock); spin_lock_init(&dd->ipath_eep_st_lock); diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_kernel.h b/trunk/drivers/infiniband/hw/ipath/ipath_kernel.h index 6ba4861dd6ac..0bd8bcb184a1 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/trunk/drivers/infiniband/hw/ipath/ipath_kernel.h @@ -355,19 +355,6 @@ struct ipath_devdata { /* errors masked because they occur too fast */ ipath_err_t ipath_maskederrs; u64 ipath_lastlinkrecov; /* link recoveries at last ACTIVE */ - /* these 5 fields are used to establish deltas for IB Symbol - * errors and linkrecovery errors. They can be reported on - * some chips during link negotiation prior to INIT, and with - * DDR when faking DDR negotiations with non-IBTA switches. - * The chip counters are adjusted at driver unload if there is - * a non-zero delta. - */ - u64 ibdeltainprog; - u64 ibsymdelta; - u64 ibsymsnap; - u64 iblnkerrdelta; - u64 iblnkerrsnap; - /* time in jiffies at which to re-enable maskederrs */ unsigned long ipath_unmasktime; /* count of egrfull errors, combined for all ports */ @@ -477,8 +464,6 @@ struct ipath_devdata { spinlock_t ipath_kernel_tid_lock; spinlock_t ipath_user_tid_lock; spinlock_t ipath_sendctrl_lock; - /* around ipath_pd and (user ports) port_cnt use (intr vs free) */ - spinlock_t ipath_uctxt_lock; /* * IPATH_STATUS_*, diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_keys.c b/trunk/drivers/infiniband/hw/ipath/ipath_keys.c index c0e933fec218..8f32b17a5eed 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_keys.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_keys.c @@ -132,7 +132,6 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge, * (see ipath_get_dma_mr and ipath_dma.c). */ if (sge->lkey == 0) { - /* always a kernel port, no locking needed */ struct ipath_pd *pd = to_ipd(qp->ibqp.pd); if (pd->user) { @@ -212,7 +211,6 @@ int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, * (see ipath_get_dma_mr and ipath_dma.c). */ if (rkey == 0) { - /* always a kernel port, no locking needed */ struct ipath_pd *pd = to_ipd(qp->ibqp.pd); if (pd->user) { diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_mad.c b/trunk/drivers/infiniband/hw/ipath/ipath_mad.c index 17a123197477..be4fc9ada8e7 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_mad.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_mad.c @@ -348,7 +348,6 @@ static int recv_subn_get_portinfo(struct ib_smp *smp, */ static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys) { - /* always a kernel port, no locking needed */ struct ipath_portdata *pd = dd->ipath_pd[0]; memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys)); @@ -731,7 +730,6 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys) int i; int changed = 0; - /* always a kernel port, no locking needed */ pd = dd->ipath_pd[0]; for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_qp.c b/trunk/drivers/infiniband/hw/ipath/ipath_qp.c index 3a5a89b609c4..4715911101e4 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_qp.c @@ -745,7 +745,6 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, struct ipath_swqe *swq = NULL; struct ipath_ibdev *dev; size_t sz; - size_t sg_list_sz; struct ib_qp *ret; if (init_attr->create_flags) { @@ -790,31 +789,19 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, goto bail; } sz = sizeof(*qp); - sg_list_sz = 0; if (init_attr->srq) { struct ipath_srq *srq = to_isrq(init_attr->srq); - if (srq->rq.max_sge > 1) - sg_list_sz = sizeof(*qp->r_sg_list) * - (srq->rq.max_sge - 1); - } else if (init_attr->cap.max_recv_sge > 1) - sg_list_sz = sizeof(*qp->r_sg_list) * - (init_attr->cap.max_recv_sge - 1); - qp = kmalloc(sz + sg_list_sz, GFP_KERNEL); + sz += sizeof(*qp->r_sg_list) * + srq->rq.max_sge; + } else + sz += sizeof(*qp->r_sg_list) * + init_attr->cap.max_recv_sge; + qp = kmalloc(sz, GFP_KERNEL); if (!qp) { ret = ERR_PTR(-ENOMEM); goto bail_swq; } - if (sg_list_sz && (init_attr->qp_type == IB_QPT_UD || - init_attr->qp_type == IB_QPT_SMI || - init_attr->qp_type == IB_QPT_GSI)) { - qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL); - if (!qp->r_ud_sg_list) { - ret = ERR_PTR(-ENOMEM); - goto bail_qp; - } - } else - qp->r_ud_sg_list = NULL; if (init_attr->srq) { sz = 0; qp->r_rq.size = 0; @@ -831,7 +818,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, qp->r_rq.size * sz); if (!qp->r_rq.wq) { ret = ERR_PTR(-ENOMEM); - goto bail_sg_list; + goto bail_qp; } } @@ -861,7 +848,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, if (err) { ret = ERR_PTR(err); vfree(qp->r_rq.wq); - goto bail_sg_list; + goto bail_qp; } qp->ip = NULL; qp->s_tx = NULL; @@ -938,8 +925,6 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, vfree(qp->r_rq.wq); ipath_free_qp(&dev->qp_table, qp); free_qpn(&dev->qp_table, qp->ibqp.qp_num); -bail_sg_list: - kfree(qp->r_ud_sg_list); bail_qp: kfree(qp); bail_swq: @@ -1004,7 +989,6 @@ int ipath_destroy_qp(struct ib_qp *ibqp) kref_put(&qp->ip->ref, ipath_release_mmap_info); else vfree(qp->r_rq.wq); - kfree(qp->r_ud_sg_list); vfree(qp->s_wq); kfree(qp); return 0; diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_rc.c b/trunk/drivers/infiniband/hw/ipath/ipath_rc.c index 9170710b950d..7b93cda1a4bd 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_rc.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_rc.c @@ -573,8 +573,9 @@ int ipath_make_rc_req(struct ipath_qp *qp) ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len); qp->s_state = OP(RDMA_READ_REQUEST); hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); - bth2 = qp->s_psn & IPATH_PSN_MASK; - qp->s_psn = wqe->lpsn + 1; + bth2 = qp->s_psn++ & IPATH_PSN_MASK; + if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0) + qp->s_next_psn = qp->s_psn; ss = NULL; len = 0; qp->s_cur++; diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_sdma.c b/trunk/drivers/infiniband/hw/ipath/ipath_sdma.c index 8e255adf5d9b..284c9bca517e 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_sdma.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_sdma.c @@ -698,8 +698,10 @@ int ipath_sdma_verbs_send(struct ipath_devdata *dd, addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, tx->map_len, DMA_TO_DEVICE); - if (dma_mapping_error(&dd->pcidev->dev, addr)) - goto ioerr; + if (dma_mapping_error(&dd->pcidev->dev, addr)) { + ret = -EIO; + goto unlock; + } dwoffset = tx->map_len >> 2; make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0); @@ -739,8 +741,6 @@ int ipath_sdma_verbs_send(struct ipath_devdata *dd, dw = (len + 3) >> 2; addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2, DMA_TO_DEVICE); - if (dma_mapping_error(&dd->pcidev->dev, addr)) - goto unmap; make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset); /* SDmaUseLargeBuf has to be set in every descriptor */ if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF) @@ -798,18 +798,7 @@ int ipath_sdma_verbs_send(struct ipath_devdata *dd, list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist); if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15) vl15_watchdog_enq(dd); - goto unlock; - -unmap: - while (tail != dd->ipath_sdma_descq_tail) { - if (!tail) - tail = dd->ipath_sdma_descq_cnt - 1; - else - tail--; - unmap_desc(dd, tail); - } -ioerr: - ret = -EIO; + unlock: spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags); fail: diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_stats.c b/trunk/drivers/infiniband/hw/ipath/ipath_stats.c index f63e143e3292..c8e3d65f0de8 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_stats.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_stats.c @@ -112,14 +112,6 @@ u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg) dd->ipath_lastrpkts = val; } val64 = dd->ipath_rpkts; - } else if (creg == dd->ipath_cregs->cr_ibsymbolerrcnt) { - if (dd->ibdeltainprog) - val64 -= val64 - dd->ibsymsnap; - val64 -= dd->ibsymdelta; - } else if (creg == dd->ipath_cregs->cr_iblinkerrrecovcnt) { - if (dd->ibdeltainprog) - val64 -= val64 - dd->iblnkerrsnap; - val64 -= dd->iblnkerrdelta; } else val64 = (u64) val; diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_ud.c b/trunk/drivers/infiniband/hw/ipath/ipath_ud.c index 91c74cc797ae..729446f56aab 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_ud.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_ud.c @@ -70,6 +70,8 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe) goto done; } + rsge.sg_list = NULL; + /* * Check that the qkey matches (except for QP0, see 9.6.1.4.1). * Qkeys with the high order bit set mean use the @@ -113,6 +115,21 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe) rq = &qp->r_rq; } + if (rq->max_sge > 1) { + /* + * XXX We could use GFP_KERNEL if ipath_do_send() + * was always called from the tasklet instead of + * from ipath_post_send(). + */ + rsge.sg_list = kmalloc((rq->max_sge - 1) * + sizeof(struct ipath_sge), + GFP_ATOMIC); + if (!rsge.sg_list) { + dev->n_pkt_drops++; + goto drop; + } + } + /* * Get the next work request entry to find where to put the data. * Note that it is safe to drop the lock after changing rq->tail @@ -130,7 +147,6 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe) goto drop; } wqe = get_rwqe_ptr(rq, tail); - rsge.sg_list = qp->r_ud_sg_list; if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) { spin_unlock_irqrestore(&rq->lock, flags); dev->n_pkt_drops++; @@ -226,6 +242,7 @@ static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe) ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, swqe->wr.send_flags & IB_SEND_SOLICITED); drop: + kfree(rsge.sg_list); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); done:; diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_verbs.c b/trunk/drivers/infiniband/hw/ipath/ipath_verbs.c index cdf0e6abd34d..eabc4247860b 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -1852,7 +1852,7 @@ unsigned ipath_get_npkeys(struct ipath_devdata *dd) } /** - * ipath_get_pkey - return the indexed PKEY from the port PKEY table + * ipath_get_pkey - return the indexed PKEY from the port 0 PKEY table * @dd: the infinipath device * @index: the PKEY index */ @@ -1860,7 +1860,6 @@ unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index) { unsigned ret; - /* always a kernel port, no locking needed */ if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys)) ret = 0; else diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_verbs.h b/trunk/drivers/infiniband/hw/ipath/ipath_verbs.h index 11e3f613df93..9d12ae8a778e 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_verbs.h +++ b/trunk/drivers/infiniband/hw/ipath/ipath_verbs.h @@ -431,7 +431,6 @@ struct ipath_qp { u32 s_lsn; /* limit sequence number (credit) */ struct ipath_swqe *s_wq; /* send work queue */ struct ipath_swqe *s_wqe; - struct ipath_sge *r_ud_sg_list; struct ipath_rq r_rq; /* receive work queue */ struct ipath_sge r_sg_list[0]; /* verified SGEs */ }; diff --git a/trunk/drivers/infiniband/ulp/iser/iscsi_iser.h b/trunk/drivers/infiniband/ulp/iser/iscsi_iser.h index 81a82628a5f1..861119593f2b 100644 --- a/trunk/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/trunk/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -252,6 +252,9 @@ struct iser_conn { wait_queue_head_t wait; /* waitq for conn/disconn */ atomic_t post_recv_buf_count; /* posted rx count */ atomic_t post_send_buf_count; /* posted tx count */ + atomic_t unexpected_pdu_count;/* count of received * + * unexpected pdus * + * not yet retired */ char name[ISER_OBJECT_NAME_SIZE]; struct iser_page_vec *page_vec; /* represents SG to fmr maps* * maps serialized as tx is*/ diff --git a/trunk/drivers/infiniband/ulp/iser/iser_initiator.c b/trunk/drivers/infiniband/ulp/iser/iser_initiator.c index cdd283189047..ed1aff21b7ea 100644 --- a/trunk/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/trunk/drivers/infiniband/ulp/iser/iser_initiator.c @@ -183,14 +183,8 @@ static int iser_post_receive_control(struct iscsi_conn *conn) struct iser_regd_buf *regd_data; struct iser_dto *recv_dto = NULL; struct iser_device *device = iser_conn->ib_conn->device; - int rx_data_size, err = 0; - - rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO); - if (rx_desc == NULL) { - iser_err("Failed to alloc desc for post recv\n"); - return -ENOMEM; - } - rx_desc->type = ISCSI_RX; + int rx_data_size, err; + int posts, outstanding_unexp_pdus; /* for the login sequence we must support rx of upto 8K; login is done * after conn create/bind (connect) and conn stop/bind (reconnect), @@ -201,46 +195,80 @@ static int iser_post_receive_control(struct iscsi_conn *conn) else /* FIXME till user space sets conn->max_recv_dlength correctly */ rx_data_size = 128; - rx_desc->data = kmalloc(rx_data_size, GFP_NOIO); - if (rx_desc->data == NULL) { - iser_err("Failed to alloc data buf for post recv\n"); - err = -ENOMEM; - goto post_rx_kmalloc_failure; - } + outstanding_unexp_pdus = + atomic_xchg(&iser_conn->ib_conn->unexpected_pdu_count, 0); - recv_dto = &rx_desc->dto; - recv_dto->ib_conn = iser_conn->ib_conn; - recv_dto->regd_vector_len = 0; + /* + * in addition to the response buffer, replace those consumed by + * unexpected pdus. + */ + for (posts = 0; posts < 1 + outstanding_unexp_pdus; posts++) { + rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO); + if (rx_desc == NULL) { + iser_err("Failed to alloc desc for post recv %d\n", + posts); + err = -ENOMEM; + goto post_rx_cache_alloc_failure; + } + rx_desc->type = ISCSI_RX; + rx_desc->data = kmalloc(rx_data_size, GFP_NOIO); + if (rx_desc->data == NULL) { + iser_err("Failed to alloc data buf for post recv %d\n", + posts); + err = -ENOMEM; + goto post_rx_kmalloc_failure; + } - regd_hdr = &rx_desc->hdr_regd_buf; - memset(regd_hdr, 0, sizeof(struct iser_regd_buf)); - regd_hdr->device = device; - regd_hdr->virt_addr = rx_desc; /* == &rx_desc->iser_header */ - regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN; + recv_dto = &rx_desc->dto; + recv_dto->ib_conn = iser_conn->ib_conn; + recv_dto->regd_vector_len = 0; - iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE); + regd_hdr = &rx_desc->hdr_regd_buf; + memset(regd_hdr, 0, sizeof(struct iser_regd_buf)); + regd_hdr->device = device; + regd_hdr->virt_addr = rx_desc; /* == &rx_desc->iser_header */ + regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN; - iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0); + iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE); - regd_data = &rx_desc->data_regd_buf; - memset(regd_data, 0, sizeof(struct iser_regd_buf)); - regd_data->device = device; - regd_data->virt_addr = rx_desc->data; - regd_data->data_size = rx_data_size; + iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0); - iser_reg_single(device, regd_data, DMA_FROM_DEVICE); + regd_data = &rx_desc->data_regd_buf; + memset(regd_data, 0, sizeof(struct iser_regd_buf)); + regd_data->device = device; + regd_data->virt_addr = rx_desc->data; + regd_data->data_size = rx_data_size; - iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0); + iser_reg_single(device, regd_data, DMA_FROM_DEVICE); - err = iser_post_recv(rx_desc); - if (!err) - return 0; + iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0); - /* iser_post_recv failed */ + err = iser_post_recv(rx_desc); + if (err) { + iser_err("Failed iser_post_recv for post %d\n", posts); + goto post_rx_post_recv_failure; + } + } + /* all posts successful */ + return 0; + +post_rx_post_recv_failure: iser_dto_buffs_release(recv_dto); kfree(rx_desc->data); post_rx_kmalloc_failure: kmem_cache_free(ig.desc_cache, rx_desc); +post_rx_cache_alloc_failure: + if (posts > 0) { + /* + * response buffer posted, but did not replace all unexpected + * pdu recv bufs. Ignore error, retry occurs next send + */ + outstanding_unexp_pdus -= (posts - 1); + err = 0; + } + atomic_add(outstanding_unexp_pdus, + &iser_conn->ib_conn->unexpected_pdu_count); + return err; } @@ -274,8 +302,10 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn) struct iscsi_iser_conn *iser_conn = conn->dd_data; int i; - /* no need to keep it in a var, we are after login so if this should - * be negotiated, by now the result should be available here */ + /* + * FIXME this value should be declared to the target during login with + * the MaxOutstandingUnexpectedPDUs key when supported + */ int initial_post_recv_bufs_num = ISER_MAX_RX_MISC_PDUS; iser_dbg("Initially post: %d\n", initial_post_recv_bufs_num); @@ -478,6 +508,7 @@ int iser_send_control(struct iscsi_conn *conn, int err = 0; struct iser_regd_buf *regd_buf; struct iser_device *device; + unsigned char opcode; if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); @@ -512,10 +543,15 @@ int iser_send_control(struct iscsi_conn *conn, data_seg_len); } - if (iser_post_receive_control(conn) != 0) { - iser_err("post_rcv_buff failed!\n"); - err = -ENOMEM; - goto send_control_error; + opcode = task->hdr->opcode & ISCSI_OPCODE_MASK; + + /* post recv buffer for response if one is expected */ + if (!(opcode == ISCSI_OP_NOOP_OUT && task->hdr->itt == RESERVED_ITT)) { + if (iser_post_receive_control(conn) != 0) { + iser_err("post_rcv_buff failed!\n"); + err = -ENOMEM; + goto send_control_error; + } } err = iser_post_send(mdesc); @@ -586,6 +622,20 @@ void iser_rcv_completion(struct iser_desc *rx_desc, * parallel to the execution of iser_conn_term. So the code that waits * * for the posted rx bufs refcount to become zero handles everything */ atomic_dec(&conn->ib_conn->post_recv_buf_count); + + /* + * if an unexpected PDU was received then the recv wr consumed must + * be replaced, this is done in the next send of a control-type PDU + */ + if (opcode == ISCSI_OP_NOOP_IN && hdr->itt == RESERVED_ITT) { + /* nop-in with itt = 0xffffffff */ + atomic_inc(&conn->ib_conn->unexpected_pdu_count); + } + else if (opcode == ISCSI_OP_ASYNC_EVENT) { + /* asyncronous message */ + atomic_inc(&conn->ib_conn->unexpected_pdu_count); + } + /* a reject PDU consumes the recv buf posted for the response */ } void iser_snd_completion(struct iser_desc *tx_desc) diff --git a/trunk/drivers/infiniband/ulp/iser/iser_verbs.c b/trunk/drivers/infiniband/ulp/iser/iser_verbs.c index 26ff6214a81f..6dc6b174cdd4 100644 --- a/trunk/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/trunk/drivers/infiniband/ulp/iser/iser_verbs.c @@ -498,6 +498,7 @@ void iser_conn_init(struct iser_conn *ib_conn) init_waitqueue_head(&ib_conn->wait); atomic_set(&ib_conn->post_recv_buf_count, 0); atomic_set(&ib_conn->post_send_buf_count, 0); + atomic_set(&ib_conn->unexpected_pdu_count, 0); atomic_set(&ib_conn->refcount, 1); INIT_LIST_HEAD(&ib_conn->conn_list); spin_lock_init(&ib_conn->lock);