From 3463b1ceddca772b20e71434a246cfef85c4a11f Mon Sep 17 00:00:00 2001 From: Stefan Rompf Date: Tue, 9 May 2006 15:15:35 -0700 Subject: [PATCH] --- yaml --- r: 26632 b: refs/heads/master c: 3a01c1ef75e1d84752ddef607c389bbde9c2576e h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/Documentation/networking/operstates.txt | 161 +++++++++++++++ trunk/Documentation/scsi/ChangeLog.megaraid | 25 --- trunk/block/elevator.c | 8 +- trunk/block/ll_rw_blk.c | 17 +- trunk/drivers/char/pcmcia/cm4000_cs.c | 10 +- trunk/drivers/char/pcmcia/cm4040_cs.c | 11 +- trunk/drivers/infiniband/core/sysfs.c | 2 +- trunk/drivers/infiniband/hw/mthca/mthca_cq.c | 41 ++-- trunk/drivers/infiniband/hw/mthca/mthca_dev.h | 2 +- trunk/drivers/infiniband/hw/mthca/mthca_mr.c | 15 +- .../infiniband/hw/mthca/mthca_provider.h | 22 +- trunk/drivers/infiniband/hw/mthca/mthca_qp.c | 31 +-- trunk/drivers/infiniband/hw/mthca/mthca_srq.c | 23 +-- .../drivers/infiniband/ulp/ipoib/ipoib_vlan.c | 4 +- trunk/drivers/infiniband/ulp/srp/ib_srp.c | 195 ++++++++---------- trunk/drivers/infiniband/ulp/srp/ib_srp.h | 4 +- trunk/drivers/message/fusion/mptbase.c | 63 +++--- trunk/drivers/message/fusion/mptbase.h | 10 +- trunk/drivers/message/fusion/mptfc.c | 134 +++++------- trunk/drivers/message/fusion/mptsas.c | 99 +++------ trunk/drivers/message/fusion/mptscsih.c | 50 +++-- trunk/drivers/message/fusion/mptspi.c | 68 +----- trunk/drivers/net/dl2k.c | 12 +- trunk/drivers/net/phy/mdio_bus.c | 4 +- trunk/drivers/net/sis900.c | 1 - trunk/drivers/net/sky2.c | 5 +- trunk/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c | 1 - trunk/drivers/scsi/aic7xxx/aic7xxx_pci.c | 12 +- trunk/drivers/scsi/ibmvscsi/ibmvscsi.c | 30 ++- trunk/drivers/scsi/lpfc/lpfc_crtn.h | 1 + trunk/drivers/scsi/lpfc/lpfc_disc.h | 1 - trunk/drivers/scsi/lpfc/lpfc_els.c | 95 ++++----- trunk/drivers/scsi/lpfc/lpfc_hbadisc.c | 18 +- trunk/drivers/scsi/lpfc/lpfc_hw.h | 3 - trunk/drivers/scsi/lpfc/lpfc_init.c | 22 +- trunk/drivers/scsi/lpfc/lpfc_mbox.c | 33 ++- trunk/drivers/scsi/lpfc/lpfc_nportdisc.c | 134 ++++-------- trunk/drivers/scsi/lpfc/lpfc_scsi.c | 68 ++++-- trunk/drivers/scsi/lpfc/lpfc_version.h | 2 +- trunk/drivers/scsi/megaraid.c | 1 + trunk/drivers/scsi/megaraid/megaraid_mbox.c | 59 ++---- trunk/drivers/scsi/megaraid/megaraid_mbox.h | 7 +- trunk/drivers/scsi/megaraid/megaraid_mm.c | 6 +- trunk/drivers/scsi/qla2xxx/qla_os.c | 19 +- trunk/drivers/scsi/scsi_devinfo.c | 2 - trunk/drivers/scsi/scsi_lib.c | 27 +-- trunk/drivers/scsi/sim710.c | 2 +- trunk/include/linux/dma-mapping.h | 1 - trunk/include/scsi/srp.h | 23 +-- trunk/kernel/ptrace.c | 20 +- 51 files changed, 703 insertions(+), 903 deletions(-) create mode 100644 trunk/Documentation/networking/operstates.txt diff --git a/[refs] b/[refs] index f7320cadf9dd..830f2c6c9d1c 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: f7a014af2d76a96e5af51b64f954328b700fa62f +refs/heads/master: 3a01c1ef75e1d84752ddef607c389bbde9c2576e diff --git a/trunk/Documentation/networking/operstates.txt b/trunk/Documentation/networking/operstates.txt new file mode 100644 index 000000000000..4a21d9bb836b --- /dev/null +++ b/trunk/Documentation/networking/operstates.txt @@ -0,0 +1,161 @@ + +1. Introduction + +Linux distinguishes between administrative and operational state of an +interface. Admininstrative state is the result of "ip link set dev + up or down" and reflects whether the administrator wants to use +the device for traffic. + +However, an interface is not usable just because the admin enabled it +- ethernet requires to be plugged into the switch and, depending on +a site's networking policy and configuration, an 802.1X authentication +to be performed before user data can be transferred. Operational state +shows the ability of an interface to transmit this user data. + +Thanks to 802.1X, userspace must be granted the possibility to +influence operational state. To accommodate this, operational state is +split into two parts: Two flags that can be set by the driver only, and +a RFC2863 compatible state that is derived from these flags, a policy, +and changeable from userspace under certain rules. + + +2. Querying from userspace + +Both admin and operational state can be queried via the netlink +operation RTM_GETLINK. It is also possible to subscribe to RTMGRP_LINK +to be notified of updates. This is important for setting from userspace. + +These values contain interface state: + +ifinfomsg::if_flags & IFF_UP: + Interface is admin up +ifinfomsg::if_flags & IFF_RUNNING: + Interface is in RFC2863 operational state UP or UNKNOWN. This is for + backward compatibility, routing daemons, dhcp clients can use this + flag to determine whether they should use the interface. +ifinfomsg::if_flags & IFF_LOWER_UP: + Driver has signaled netif_carrier_on() +ifinfomsg::if_flags & IFF_DORMANT: + Driver has signaled netif_dormant_on() + +These interface flags can also be queried without netlink using the +SIOCGIFFLAGS ioctl. + +TLV IFLA_OPERSTATE + +contains RFC2863 state of the interface in numeric representation: + +IF_OPER_UNKNOWN (0): + Interface is in unknown state, neither driver nor userspace has set + operational state. Interface must be considered for user data as + setting operational state has not been implemented in every driver. +IF_OPER_NOTPRESENT (1): + Unused in current kernel (notpresent interfaces normally disappear), + just a numerical placeholder. +IF_OPER_DOWN (2): + Interface is unable to transfer data on L1, f.e. ethernet is not + plugged or interface is ADMIN down. +IF_OPER_LOWERLAYERDOWN (3): + Interfaces stacked on an interface that is IF_OPER_DOWN show this + state (f.e. VLAN). +IF_OPER_TESTING (4): + Unused in current kernel. +IF_OPER_DORMANT (5): + Interface is L1 up, but waiting for an external event, f.e. for a + protocol to establish. (802.1X) +IF_OPER_UP (6): + Interface is operational up and can be used. + +This TLV can also be queried via sysfs. + +TLV IFLA_LINKMODE + +contains link policy. This is needed for userspace interaction +described below. + +This TLV can also be queried via sysfs. + + +3. Kernel driver API + +Kernel drivers have access to two flags that map to IFF_LOWER_UP and +IFF_DORMANT. These flags can be set from everywhere, even from +interrupts. It is guaranteed that only the driver has write access, +however, if different layers of the driver manipulate the same flag, +the driver has to provide the synchronisation needed. + +__LINK_STATE_NOCARRIER, maps to !IFF_LOWER_UP: + +The driver uses netif_carrier_on() to clear and netif_carrier_off() to +set this flag. On netif_carrier_off(), the scheduler stops sending +packets. The name 'carrier' and the inversion are historical, think of +it as lower layer. + +netif_carrier_ok() can be used to query that bit. + +__LINK_STATE_DORMANT, maps to IFF_DORMANT: + +Set by the driver to express that the device cannot yet be used +because some driver controlled protocol establishment has to +complete. Corresponding functions are netif_dormant_on() to set the +flag, netif_dormant_off() to clear it and netif_dormant() to query. + +On device allocation, networking core sets the flags equivalent to +netif_carrier_ok() and !netif_dormant(). + + +Whenever the driver CHANGES one of these flags, a workqueue event is +scheduled to translate the flag combination to IFLA_OPERSTATE as +follows: + +!netif_carrier_ok(): + IF_OPER_LOWERLAYERDOWN if the interface is stacked, IF_OPER_DOWN + otherwise. Kernel can recognise stacked interfaces because their + ifindex != iflink. + +netif_carrier_ok() && netif_dormant(): + IF_OPER_DORMANT + +netif_carrier_ok() && !netif_dormant(): + IF_OPER_UP if userspace interaction is disabled. Otherwise + IF_OPER_DORMANT with the possibility for userspace to initiate the + IF_OPER_UP transition afterwards. + + +4. Setting from userspace + +Applications have to use the netlink interface to influence the +RFC2863 operational state of an interface. Setting IFLA_LINKMODE to 1 +via RTM_SETLINK instructs the kernel that an interface should go to +IF_OPER_DORMANT instead of IF_OPER_UP when the combination +netif_carrier_ok() && !netif_dormant() is set by the +driver. Afterwards, the userspace application can set IFLA_OPERSTATE +to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set +netif_carrier_off() or netif_dormant_on(). Changes made by userspace +are multicasted on the netlink group RTMGRP_LINK. + +So basically a 802.1X supplicant interacts with the kernel like this: + +-subscribe to RTMGRP_LINK +-set IFLA_LINKMODE to 1 via RTM_SETLINK +-query RTM_GETLINK once to get initial state +-if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until + netlink multicast signals this state +-do 802.1X, eventually abort if flags go down again +-send RTM_SETLINK to set operstate to IF_OPER_UP if authentication + succeeds, IF_OPER_DORMANT otherwise +-see how operstate and IFF_RUNNING is echoed via netlink multicast +-set interface back to IF_OPER_DORMANT if 802.1X reauthentication + fails +-restart if kernel changes IFF_LOWER_UP or IFF_DORMANT flag + +if supplicant goes down, bring back IFLA_LINKMODE to 0 and +IFLA_OPERSTATE to a sane value. + +A routing daemon or dhcp client just needs to care for IFF_RUNNING or +waiting for operstate to go IF_OPER_UP/IF_OPER_UNKNOWN before +considering the interface / querying a DHCP address. + + +For technical questions and/or comments please e-mail to Stefan Rompf +(stefan at loplof.de). diff --git a/trunk/Documentation/scsi/ChangeLog.megaraid b/trunk/Documentation/scsi/ChangeLog.megaraid index c173806c91fa..09f6300eda4b 100644 --- a/trunk/Documentation/scsi/ChangeLog.megaraid +++ b/trunk/Documentation/scsi/ChangeLog.megaraid @@ -1,28 +1,3 @@ -Release Date : Mon Apr 11 12:27:22 EST 2006 - Seokmann Ju -Current Version : 2.20.4.8 (scsi module), 2.20.2.6 (cmm module) -Older Version : 2.20.4.7 (scsi module), 2.20.2.6 (cmm module) - -1. Fixed a bug in megaraid_reset_handler(). - Customer reported "Unable to handle kernel NULL pointer dereference - at virtual address 00000000" when system goes to reset condition - for some reason. It happened randomly. - Root Cause: in the megaraid_reset_handler(), there is possibility not - returning pending packets in the pend_list if there are multiple - pending packets. - Fix: Made the change in the driver so that it will return all packets - in the pend_list. - -2. Added change request. - As found in the following URL, rmb() only didn't help the - problem. I had to increase the loop counter to 0xFFFFFF. (6 F's) - http://marc.theaimsgroup.com/?l=linux-scsi&m=110971060502497&w=2 - - I attached a patch for your reference, too. - Could you check and get this fix in your driver? - - Best Regards, - Jun'ichi Nomura - Release Date : Fri Nov 11 12:27:22 EST 2005 - Seokmann Ju Current Version : 2.20.4.7 (scsi module), 2.20.2.6 (cmm module) Older Version : 2.20.4.6 (scsi module), 2.20.2.6 (cmm module) diff --git a/trunk/block/elevator.c b/trunk/block/elevator.c index 8768a367fdde..29825792cbd5 100644 --- a/trunk/block/elevator.c +++ b/trunk/block/elevator.c @@ -333,7 +333,6 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) { struct list_head *pos; unsigned ordseq; - int unplug_it = 1; blk_add_trace_rq(q, rq, BLK_TA_INSERT); @@ -400,11 +399,6 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) } list_add_tail(&rq->queuelist, pos); - /* - * most requeues happen because of a busy condition, don't - * force unplug of the queue for that case. - */ - unplug_it = 0; break; default: @@ -413,7 +407,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) BUG(); } - if (unplug_it && blk_queue_plugged(q)) { + if (blk_queue_plugged(q)) { int nrq = q->rq.count[READ] + q->rq.count[WRITE] - q->in_flight; diff --git a/trunk/block/ll_rw_blk.c b/trunk/block/ll_rw_blk.c index eac48bec1479..e5041a02e21f 100644 --- a/trunk/block/ll_rw_blk.c +++ b/trunk/block/ll_rw_blk.c @@ -1732,21 +1732,8 @@ void blk_run_queue(struct request_queue *q) spin_lock_irqsave(q->queue_lock, flags); blk_remove_plug(q); - - /* - * Only recurse once to avoid overrunning the stack, let the unplug - * handling reinvoke the handler shortly if we already got there. - */ - if (!elv_queue_empty(q)) { - if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { - q->request_fn(q); - clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); - } else { - blk_plug_device(q); - kblockd_schedule_work(&q->unplug_work); - } - } - + if (!elv_queue_empty(q)) + q->request_fn(q); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_run_queue); diff --git a/trunk/drivers/char/pcmcia/cm4000_cs.c b/trunk/drivers/char/pcmcia/cm4000_cs.c index 128b2632512d..02114a0bd0d9 100644 --- a/trunk/drivers/char/pcmcia/cm4000_cs.c +++ b/trunk/drivers/char/pcmcia/cm4000_cs.c @@ -1981,6 +1981,10 @@ static int __init cmm_init(void) if (!cmm_class) return -1; + rc = pcmcia_register_driver(&cm4000_driver); + if (rc < 0) + return rc; + major = register_chrdev(0, DEVICE_NAME, &cm4000_fops); if (major < 0) { printk(KERN_WARNING MODULE_NAME @@ -1988,12 +1992,6 @@ static int __init cmm_init(void) return -1; } - rc = pcmcia_register_driver(&cm4000_driver); - if (rc < 0) { - unregister_chrdev(major, DEVICE_NAME); - return rc; - } - return 0; } diff --git a/trunk/drivers/char/pcmcia/cm4040_cs.c b/trunk/drivers/char/pcmcia/cm4040_cs.c index 47a8465bf95b..29efa64580a8 100644 --- a/trunk/drivers/char/pcmcia/cm4040_cs.c +++ b/trunk/drivers/char/pcmcia/cm4040_cs.c @@ -724,19 +724,16 @@ static int __init cm4040_init(void) if (!cmx_class) return -1; + rc = pcmcia_register_driver(&reader_driver); + if (rc < 0) + return rc; + major = register_chrdev(0, DEVICE_NAME, &reader_fops); if (major < 0) { printk(KERN_WARNING MODULE_NAME ": could not get major number\n"); return -1; } - - rc = pcmcia_register_driver(&reader_driver); - if (rc < 0) { - unregister_chrdev(major, DEVICE_NAME); - return rc; - } - return 0; } diff --git a/trunk/drivers/infiniband/core/sysfs.c b/trunk/drivers/infiniband/core/sysfs.c index 21f9282c1b25..15121cb5a1f6 100644 --- a/trunk/drivers/infiniband/core/sysfs.c +++ b/trunk/drivers/infiniband/core/sysfs.c @@ -336,7 +336,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr, switch (width) { case 4: ret = sprintf(buf, "%u\n", (out_mad->data[40 + offset / 8] >> - (4 - (offset % 8))) & 0xf); + (offset % 4)) & 0xf); break; case 8: ret = sprintf(buf, "%u\n", out_mad->data[40 + offset / 8]); diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_cq.c b/trunk/drivers/infiniband/hw/mthca/mthca_cq.c index 205854e9c662..312cf90731ea 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_cq.c @@ -238,9 +238,9 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, spin_lock(&dev->cq_table.lock); cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); - if (cq) - ++cq->refcount; + if (cq) + atomic_inc(&cq->refcount); spin_unlock(&dev->cq_table.lock); if (!cq) { @@ -254,10 +254,8 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn, if (cq->ibcq.event_handler) cq->ibcq.event_handler(&event, cq->ibcq.cq_context); - spin_lock(&dev->cq_table.lock); - if (!--cq->refcount) + if (atomic_dec_and_test(&cq->refcount)) wake_up(&cq->wait); - spin_unlock(&dev->cq_table.lock); } static inline int is_recv_cqe(struct mthca_cqe *cqe) @@ -269,13 +267,23 @@ static inline int is_recv_cqe(struct mthca_cqe *cqe) return !(cqe->is_send & 0x80); } -void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, +void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, struct mthca_srq *srq) { + struct mthca_cq *cq; struct mthca_cqe *cqe; u32 prod_index; int nfreed = 0; + spin_lock_irq(&dev->cq_table.lock); + cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); + if (cq) + atomic_inc(&cq->refcount); + spin_unlock_irq(&dev->cq_table.lock); + + if (!cq) + return; + spin_lock_irq(&cq->lock); /* @@ -293,7 +301,7 @@ void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, if (0) mthca_dbg(dev, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", - qpn, cq->cqn, cq->cons_index, prod_index); + qpn, cqn, cq->cons_index, prod_index); /* * Now sweep backwards through the CQ, removing CQ entries @@ -317,6 +325,8 @@ void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, } spin_unlock_irq(&cq->lock); + if (atomic_dec_and_test(&cq->refcount)) + wake_up(&cq->wait); } void mthca_cq_resize_copy_cqes(struct mthca_cq *cq) @@ -811,7 +821,7 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, } spin_lock_init(&cq->lock); - cq->refcount = 1; + atomic_set(&cq->refcount, 1); init_waitqueue_head(&cq->wait); memset(cq_context, 0, sizeof *cq_context); @@ -886,17 +896,6 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, return err; } -static inline int get_cq_refcount(struct mthca_dev *dev, struct mthca_cq *cq) -{ - int c; - - spin_lock_irq(&dev->cq_table.lock); - c = cq->refcount; - spin_unlock_irq(&dev->cq_table.lock); - - return c; -} - void mthca_free_cq(struct mthca_dev *dev, struct mthca_cq *cq) { @@ -930,7 +929,6 @@ void mthca_free_cq(struct mthca_dev *dev, spin_lock_irq(&dev->cq_table.lock); mthca_array_clear(&dev->cq_table.cq, cq->cqn & (dev->limits.num_cqs - 1)); - --cq->refcount; spin_unlock_irq(&dev->cq_table.lock); if (dev->mthca_flags & MTHCA_FLAG_MSI_X) @@ -938,7 +936,8 @@ void mthca_free_cq(struct mthca_dev *dev, else synchronize_irq(dev->pdev->irq); - wait_event(cq->wait, !get_cq_refcount(dev, cq)); + atomic_dec(&cq->refcount); + wait_event(cq->wait, !atomic_read(&cq->refcount)); if (cq->is_kernel) { mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_dev.h b/trunk/drivers/infiniband/hw/mthca/mthca_dev.h index f8160b8de090..4c1dcb4c1822 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_dev.h +++ b/trunk/drivers/infiniband/hw/mthca/mthca_dev.h @@ -496,7 +496,7 @@ void mthca_free_cq(struct mthca_dev *dev, void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); void mthca_cq_event(struct mthca_dev *dev, u32 cqn, enum ib_event_type event_type); -void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, +void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, struct mthca_srq *srq); void mthca_cq_resize_copy_cqes(struct mthca_cq *cq); int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent); diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_mr.c b/trunk/drivers/infiniband/hw/mthca/mthca_mr.c index a486dec1707e..25e1c1db9a40 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_mr.c @@ -761,7 +761,6 @@ void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) int __devinit mthca_init_mr_table(struct mthca_dev *dev) { - unsigned long addr; int err, i; err = mthca_alloc_init(&dev->mr_table.mpt_alloc, @@ -797,12 +796,9 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) goto err_fmr_mpt; } - addr = pci_resource_start(dev->pdev, 4) + - ((pci_resource_len(dev->pdev, 4) - 1) & - dev->mr_table.mpt_base); - dev->mr_table.tavor_fmr.mpt_base = - ioremap(addr, (1 << i) * sizeof(struct mthca_mpt_entry)); + ioremap(dev->mr_table.mpt_base, + (1 << i) * sizeof (struct mthca_mpt_entry)); if (!dev->mr_table.tavor_fmr.mpt_base) { mthca_warn(dev, "MPT ioremap for FMR failed.\n"); @@ -810,12 +806,9 @@ int __devinit mthca_init_mr_table(struct mthca_dev *dev) goto err_fmr_mpt; } - addr = pci_resource_start(dev->pdev, 4) + - ((pci_resource_len(dev->pdev, 4) - 1) & - dev->mr_table.mtt_base); - dev->mr_table.tavor_fmr.mtt_base = - ioremap(addr, (1 << i) * MTHCA_MTT_SEG_SIZE); + ioremap(dev->mr_table.mtt_base, + (1 << i) * MTHCA_MTT_SEG_SIZE); if (!dev->mr_table.tavor_fmr.mtt_base) { mthca_warn(dev, "MTT ioremap for FMR failed.\n"); err = -ENOMEM; diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_provider.h b/trunk/drivers/infiniband/hw/mthca/mthca_provider.h index 179a8f610d0f..6676a786d690 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_provider.h +++ b/trunk/drivers/infiniband/hw/mthca/mthca_provider.h @@ -139,12 +139,11 @@ struct mthca_ah { * a qp may be locked, with the send cq locked first. No other * nesting should be done. * - * Each struct mthca_cq/qp also has an ref count, protected by the - * corresponding table lock. The pointer from the cq/qp_table to the - * struct counts as one reference. This reference also is good for - * access through the consumer API, so modifying the CQ/QP etc doesn't - * need to take another reference. Access to a QP because of a - * completion being polled does not need a reference either. + * Each struct mthca_cq/qp also has an atomic_t ref count. The + * pointer from the cq/qp_table to the struct counts as one reference. + * This reference also is good for access through the consumer API, so + * modifying the CQ/QP etc doesn't need to take another reference. + * Access because of a completion being polled does need a reference. * * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the * destroy function to sleep on. @@ -160,9 +159,8 @@ struct mthca_ah { * - decrement ref count; if zero, wake up waiters * * To destroy a CQ/QP, we can do the following: - * - lock cq/qp_table - * - remove pointer and decrement ref count - * - unlock cq/qp_table lock + * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock + * - decrement ref count * - wait_event until ref count is zero * * It is the consumer's responsibilty to make sure that no QP @@ -199,7 +197,7 @@ struct mthca_cq_resize { struct mthca_cq { struct ib_cq ibcq; spinlock_t lock; - int refcount; + atomic_t refcount; int cqn; u32 cons_index; struct mthca_cq_buf buf; @@ -219,7 +217,7 @@ struct mthca_cq { struct mthca_srq { struct ib_srq ibsrq; spinlock_t lock; - int refcount; + atomic_t refcount; int srqn; int max; int max_gs; @@ -256,7 +254,7 @@ struct mthca_wq { struct mthca_qp { struct ib_qp ibqp; - int refcount; + atomic_t refcount; u32 qpn; int is_direct; u8 port; /* for SQP and memfree use only */ diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_qp.c b/trunk/drivers/infiniband/hw/mthca/mthca_qp.c index 19765f6f8d58..f37b0e367323 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_qp.c @@ -240,7 +240,7 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, spin_lock(&dev->qp_table.lock); qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); if (qp) - ++qp->refcount; + atomic_inc(&qp->refcount); spin_unlock(&dev->qp_table.lock); if (!qp) { @@ -257,10 +257,8 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn, if (qp->ibqp.event_handler) qp->ibqp.event_handler(&event, qp->ibqp.qp_context); - spin_lock(&dev->qp_table.lock); - if (!--qp->refcount) + if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); - spin_unlock(&dev->qp_table.lock); } static int to_mthca_state(enum ib_qp_state ib_state) @@ -835,10 +833,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) * entries and reinitialize the QP. */ if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { - mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, + mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); if (qp->ibqp.send_cq != qp->ibqp.recv_cq) - mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, + mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); mthca_wq_init(&qp->sq); @@ -1098,7 +1096,7 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, int ret; int i; - qp->refcount = 1; + atomic_set(&qp->refcount, 1); init_waitqueue_head(&qp->wait); qp->state = IB_QPS_RESET; qp->atomic_rd_en = 0; @@ -1320,17 +1318,6 @@ int mthca_alloc_sqp(struct mthca_dev *dev, return err; } -static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) -{ - int c; - - spin_lock_irq(&dev->qp_table.lock); - c = qp->refcount; - spin_unlock_irq(&dev->qp_table.lock); - - return c; -} - void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp) { @@ -1352,14 +1339,14 @@ void mthca_free_qp(struct mthca_dev *dev, spin_lock(&dev->qp_table.lock); mthca_array_clear(&dev->qp_table.qp, qp->qpn & (dev->limits.num_qps - 1)); - --qp->refcount; spin_unlock(&dev->qp_table.lock); if (send_cq != recv_cq) spin_unlock(&recv_cq->lock); spin_unlock_irq(&send_cq->lock); - wait_event(qp->wait, !get_qp_refcount(dev, qp)); + atomic_dec(&qp->refcount); + wait_event(qp->wait, !atomic_read(&qp->refcount)); if (qp->state != IB_QPS_RESET) mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, @@ -1371,10 +1358,10 @@ void mthca_free_qp(struct mthca_dev *dev, * unref the mem-free tables and free the QPN in our table. */ if (!qp->ibqp.uobject) { - mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, + mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); if (qp->ibqp.send_cq != qp->ibqp.recv_cq) - mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, + mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); mthca_free_memfree(dev, qp); diff --git a/trunk/drivers/infiniband/hw/mthca/mthca_srq.c b/trunk/drivers/infiniband/hw/mthca/mthca_srq.c index 1ea433291fa7..adcaf85355ae 100644 --- a/trunk/drivers/infiniband/hw/mthca/mthca_srq.c +++ b/trunk/drivers/infiniband/hw/mthca/mthca_srq.c @@ -241,7 +241,7 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, goto err_out_mailbox; spin_lock_init(&srq->lock); - srq->refcount = 1; + atomic_set(&srq->refcount, 1); init_waitqueue_head(&srq->wait); if (mthca_is_memfree(dev)) @@ -308,17 +308,6 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, return err; } -static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq) -{ - int c; - - spin_lock_irq(&dev->srq_table.lock); - c = srq->refcount; - spin_unlock_irq(&dev->srq_table.lock); - - return c; -} - void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) { struct mthca_mailbox *mailbox; @@ -340,10 +329,10 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) spin_lock_irq(&dev->srq_table.lock); mthca_array_clear(&dev->srq_table.srq, srq->srqn & (dev->limits.num_srqs - 1)); - --srq->refcount; spin_unlock_irq(&dev->srq_table.lock); - wait_event(srq->wait, !get_srq_refcount(dev, srq)); + atomic_dec(&srq->refcount); + wait_event(srq->wait, !atomic_read(&srq->refcount)); if (!srq->ibsrq.uobject) { mthca_free_srq_buf(dev, srq); @@ -425,7 +414,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, spin_lock(&dev->srq_table.lock); srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); if (srq) - ++srq->refcount; + atomic_inc(&srq->refcount); spin_unlock(&dev->srq_table.lock); if (!srq) { @@ -442,10 +431,8 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn, srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); out: - spin_lock(&dev->srq_table.lock); - if (!--srq->refcount) + if (atomic_dec_and_test(&srq->refcount)) wake_up(&srq->wait); - spin_unlock(&dev->srq_table.lock); } /* diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index f887780e8093..4ca175553f9f 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -158,8 +158,10 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) if (priv->pkey == pkey) { unregister_netdev(priv->dev); ipoib_dev_cleanup(priv->dev); + list_del(&priv->list); - free_netdev(priv->dev); + + kfree(priv); ret = 0; break; diff --git a/trunk/drivers/infiniband/ulp/srp/ib_srp.c b/trunk/drivers/infiniband/ulp/srp/ib_srp.c index c32ce4348e1b..5bb55742ada6 100644 --- a/trunk/drivers/infiniband/ulp/srp/ib_srp.c +++ b/trunk/drivers/infiniband/ulp/srp/ib_srp.c @@ -409,34 +409,6 @@ static int srp_connect_target(struct srp_target_port *target) } } -static void srp_unmap_data(struct scsi_cmnd *scmnd, - struct srp_target_port *target, - struct srp_request *req) -{ - struct scatterlist *scat; - int nents; - - if (!scmnd->request_buffer || - (scmnd->sc_data_direction != DMA_TO_DEVICE && - scmnd->sc_data_direction != DMA_FROM_DEVICE)) - return; - - /* - * This handling of non-SG commands can be killed when the - * SCSI midlayer no longer generates non-SG commands. - */ - if (likely(scmnd->use_sg)) { - nents = scmnd->use_sg; - scat = scmnd->request_buffer; - } else { - nents = 1; - scat = &req->fake_sg; - } - - dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, - scmnd->sc_data_direction); -} - static int srp_reconnect_target(struct srp_target_port *target) { struct ib_cm_id *new_cm_id; @@ -483,16 +455,16 @@ static int srp_reconnect_target(struct srp_target_port *target) list_for_each_entry(req, &target->req_queue, list) { req->scmnd->result = DID_RESET << 16; req->scmnd->scsi_done(req->scmnd); - srp_unmap_data(req->scmnd, target, req); } target->rx_head = 0; target->tx_head = 0; target->tx_tail = 0; - INIT_LIST_HEAD(&target->free_reqs); + target->req_head = 0; + for (i = 0; i < SRP_SQ_SIZE - 1; ++i) + target->req_ring[i].next = i + 1; + target->req_ring[SRP_SQ_SIZE - 1].next = -1; INIT_LIST_HEAD(&target->req_queue); - for (i = 0; i < SRP_SQ_SIZE; ++i) - list_add_tail(&target->req_ring[i].list, &target->free_reqs); ret = srp_connect_target(target); if (ret) @@ -617,10 +589,40 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, return len; } -static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) +static void srp_unmap_data(struct scsi_cmnd *scmnd, + struct srp_target_port *target, + struct srp_request *req) +{ + struct scatterlist *scat; + int nents; + + if (!scmnd->request_buffer || + (scmnd->sc_data_direction != DMA_TO_DEVICE && + scmnd->sc_data_direction != DMA_FROM_DEVICE)) + return; + + /* + * This handling of non-SG commands can be killed when the + * SCSI midlayer no longer generates non-SG commands. + */ + if (likely(scmnd->use_sg)) { + nents = scmnd->use_sg; + scat = scmnd->request_buffer; + } else { + nents = 1; + scat = &req->fake_sg; + } + + dma_unmap_sg(target->srp_host->dev->dma_device, scat, nents, + scmnd->sc_data_direction); +} + +static void srp_remove_req(struct srp_target_port *target, struct srp_request *req, + int index) { - srp_unmap_data(req->scmnd, target, req); - list_move_tail(&req->list, &target->free_reqs); + list_del(&req->list); + req->next = target->req_head; + target->req_head = index; } static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) @@ -645,7 +647,7 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) req->tsk_status = rsp->data[3]; complete(&req->done); } else { - scmnd = req->scmnd; + scmnd = req->scmnd; if (!scmnd) printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n", (unsigned long long) rsp->tag); @@ -663,11 +665,14 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt); + srp_unmap_data(scmnd, target, req); + if (!req->tsk_mgmt) { + req->scmnd = NULL; scmnd->host_scribble = (void *) -1L; scmnd->scsi_done(scmnd); - srp_remove_req(target, req); + srp_remove_req(target, req, rsp->tag & ~SRP_TAG_TSK_MGMT); } else req->cmd_done = 1; } @@ -854,6 +859,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, struct srp_request *req; struct srp_iu *iu; struct srp_cmd *cmd; + long req_index; int len; if (target->state == SRP_TARGET_CONNECTING) @@ -873,20 +879,22 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, dma_sync_single_for_cpu(target->srp_host->dev->dma_device, iu->dma, SRP_MAX_IU_LEN, DMA_TO_DEVICE); - req = list_entry(target->free_reqs.next, struct srp_request, list); + req_index = target->req_head; scmnd->scsi_done = done; scmnd->result = 0; - scmnd->host_scribble = (void *) (long) req->index; + scmnd->host_scribble = (void *) req_index; cmd = iu->buf; memset(cmd, 0, sizeof *cmd); cmd->opcode = SRP_CMD; cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48); - cmd->tag = req->index; + cmd->tag = req_index; memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); + req = &target->req_ring[req_index]; + req->scmnd = scmnd; req->cmd = iu; req->cmd_done = 0; @@ -911,7 +919,8 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, goto err_unmap; } - list_move_tail(&req->list, &target->req_queue); + target->req_head = req->next; + list_add_tail(&req->list, &target->req_queue); return 0; @@ -1134,20 +1143,30 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) return 0; } -static int srp_send_tsk_mgmt(struct srp_target_port *target, - struct srp_request *req, u8 func) +static int srp_send_tsk_mgmt(struct scsi_cmnd *scmnd, u8 func) { + struct srp_target_port *target = host_to_target(scmnd->device->host); + struct srp_request *req; struct srp_iu *iu; struct srp_tsk_mgmt *tsk_mgmt; + int req_index; + int ret = FAILED; spin_lock_irq(target->scsi_host->host_lock); if (target->state == SRP_TARGET_DEAD || target->state == SRP_TARGET_REMOVED) { - req->scmnd->result = DID_BAD_TARGET << 16; + scmnd->result = DID_BAD_TARGET << 16; goto out; } + if (scmnd->host_scribble == (void *) -1L) + goto out; + + req_index = (long) scmnd->host_scribble; + printk(KERN_ERR "Abort for req_index %d\n", req_index); + + req = &target->req_ring[req_index]; init_completion(&req->done); iu = __srp_get_tx_iu(target); @@ -1158,10 +1177,10 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, memset(tsk_mgmt, 0, sizeof *tsk_mgmt); tsk_mgmt->opcode = SRP_TSK_MGMT; - tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48); - tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT; + tsk_mgmt->lun = cpu_to_be64((u64) scmnd->device->lun << 48); + tsk_mgmt->tag = req_index | SRP_TAG_TSK_MGMT; tsk_mgmt->tsk_mgmt_func = func; - tsk_mgmt->task_tag = req->index; + tsk_mgmt->task_tag = req_index; if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) goto out; @@ -1169,85 +1188,37 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, req->tsk_mgmt = iu; spin_unlock_irq(target->scsi_host->host_lock); - if (!wait_for_completion_timeout(&req->done, msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) - return -1; - - return 0; - -out: - spin_unlock_irq(target->scsi_host->host_lock); - return -1; -} - -static int srp_find_req(struct srp_target_port *target, - struct scsi_cmnd *scmnd, - struct srp_request **req) -{ - if (scmnd->host_scribble == (void *) -1L) - return -1; - - *req = &target->req_ring[(long) scmnd->host_scribble]; - - return 0; -} - -static int srp_abort(struct scsi_cmnd *scmnd) -{ - struct srp_target_port *target = host_to_target(scmnd->device->host); - struct srp_request *req; - int ret = SUCCESS; - - printk(KERN_ERR "SRP abort called\n"); - - if (srp_find_req(target, scmnd, &req)) - return FAILED; - if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK)) return FAILED; - spin_lock_irq(target->scsi_host->host_lock); if (req->cmd_done) { - srp_remove_req(target, req); + srp_remove_req(target, req, req_index); scmnd->scsi_done(scmnd); } else if (!req->tsk_status) { - srp_remove_req(target, req); + srp_remove_req(target, req, req_index); scmnd->result = DID_ABORT << 16; - } else - ret = FAILED; + ret = SUCCESS; + } +out: spin_unlock_irq(target->scsi_host->host_lock); - return ret; } -static int srp_reset_device(struct scsi_cmnd *scmnd) +static int srp_abort(struct scsi_cmnd *scmnd) { - struct srp_target_port *target = host_to_target(scmnd->device->host); - struct srp_request *req, *tmp; - - printk(KERN_ERR "SRP reset_device called\n"); - - if (srp_find_req(target, scmnd, &req)) - return FAILED; - if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET)) - return FAILED; - if (req->tsk_status) - return FAILED; - - spin_lock_irq(target->scsi_host->host_lock); + printk(KERN_ERR "SRP abort called\n"); - list_for_each_entry_safe(req, tmp, &target->req_queue, list) - if (req->scmnd->device == scmnd->device) { - req->scmnd->result = DID_RESET << 16; - scmnd->scsi_done(scmnd); - srp_remove_req(target, req); - } + return srp_send_tsk_mgmt(scmnd, SRP_TSK_ABORT_TASK); +} - spin_unlock_irq(target->scsi_host->host_lock); +static int srp_reset_device(struct scsi_cmnd *scmnd) +{ + printk(KERN_ERR "SRP reset_device called\n"); - return SUCCESS; + return srp_send_tsk_mgmt(scmnd, SRP_TSK_LUN_RESET); } static int srp_reset_host(struct scsi_cmnd *scmnd) @@ -1547,12 +1518,10 @@ static ssize_t srp_create_target(struct class_device *class_dev, INIT_WORK(&target->work, srp_reconnect_work, target); - INIT_LIST_HEAD(&target->free_reqs); + for (i = 0; i < SRP_SQ_SIZE - 1; ++i) + target->req_ring[i].next = i + 1; + target->req_ring[SRP_SQ_SIZE - 1].next = -1; INIT_LIST_HEAD(&target->req_queue); - for (i = 0; i < SRP_SQ_SIZE; ++i) { - target->req_ring[i].index = i; - list_add_tail(&target->req_ring[i].list, &target->free_reqs); - } ret = srp_parse_options(buf, target); if (ret) diff --git a/trunk/drivers/infiniband/ulp/srp/ib_srp.h b/trunk/drivers/infiniband/ulp/srp/ib_srp.h index c5cd43aae860..bd7f7c3115de 100644 --- a/trunk/drivers/infiniband/ulp/srp/ib_srp.h +++ b/trunk/drivers/infiniband/ulp/srp/ib_srp.h @@ -101,7 +101,7 @@ struct srp_request { */ struct scatterlist fake_sg; struct completion done; - short index; + short next; u8 cmd_done; u8 tsk_status; }; @@ -133,7 +133,7 @@ struct srp_target_port { unsigned tx_tail; struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; - struct list_head free_reqs; + int req_head; struct list_head req_queue; struct srp_request req_ring[SRP_SQ_SIZE]; diff --git a/trunk/drivers/message/fusion/mptbase.c b/trunk/drivers/message/fusion/mptbase.c index 9080853fe283..266414ca2814 100644 --- a/trunk/drivers/message/fusion/mptbase.c +++ b/trunk/drivers/message/fusion/mptbase.c @@ -1189,6 +1189,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) ioc->diagPending = 0; spin_lock_init(&ioc->diagLock); spin_lock_init(&ioc->fc_rescan_work_lock); + spin_lock_init(&ioc->fc_rport_lock); spin_lock_init(&ioc->initializing_hba_lock); /* Initialize the event logging. @@ -5735,13 +5736,11 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) return rc; } -# define EVENT_DESCR_STR_SZ 100 - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static void EventDescriptionStr(u8 event, u32 evData0, char *evStr) { - char *ds = NULL; + char *ds; switch(event) { case MPI_EVENT_NONE: @@ -5778,9 +5777,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP) ds = "Loop State(LIP) Change"; else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE) - ds = "Loop State(LPE) Change"; /* ??? */ + ds = "Loop State(LPE) Change"; /* ??? */ else - ds = "Loop State(LPB) Change"; /* ??? */ + ds = "Loop State(LPB) Change"; /* ??? */ break; case MPI_EVENT_LOGOUT: ds = "Logout"; @@ -5842,32 +5841,27 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) break; case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: { + char buf[50]; u8 id = (u8)(evData0); u8 ReasonCode = (u8)(evData0 >> 16); switch (ReasonCode) { case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "SAS Device Status Change: Added: id=%d", id); + sprintf(buf,"SAS Device Status Change: Added: id=%d", id); break; case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "SAS Device Status Change: Deleted: id=%d", id); + sprintf(buf,"SAS Device Status Change: Deleted: id=%d", id); break; case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "SAS Device Status Change: SMART Data: id=%d", - id); + sprintf(buf,"SAS Device Status Change: SMART Data: id=%d", id); break; case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "SAS Device Status Change: No Persistancy " - "Added: id=%d", id); + sprintf(buf,"SAS Device Status Change: No Persistancy Added: id=%d", id); break; default: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "SAS Device Status Change: Unknown: id=%d", id); - break; + sprintf(buf,"SAS Device Status Change: Unknown: id=%d", id); + break; } + ds = buf; break; } case MPI_EVENT_ON_BUS_TIMER_EXPIRED: @@ -5884,46 +5878,41 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) break; case MPI_EVENT_SAS_PHY_LINK_STATUS: { + char buf[50]; u8 LinkRates = (u8)(evData0 >> 8); u8 PhyNumber = (u8)(evData0); LinkRates = (LinkRates & MPI_EVENT_SAS_PLS_LR_CURRENT_MASK) >> MPI_EVENT_SAS_PLS_LR_CURRENT_SHIFT; switch (LinkRates) { case MPI_EVENT_SAS_PLS_LR_RATE_UNKNOWN: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "SAS PHY Link Status: Phy=%d:" + sprintf(buf,"SAS PHY Link Status: Phy=%d:" " Rate Unknown",PhyNumber); break; case MPI_EVENT_SAS_PLS_LR_RATE_PHY_DISABLED: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "SAS PHY Link Status: Phy=%d:" + sprintf(buf,"SAS PHY Link Status: Phy=%d:" " Phy Disabled",PhyNumber); break; case MPI_EVENT_SAS_PLS_LR_RATE_FAILED_SPEED_NEGOTIATION: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "SAS PHY Link Status: Phy=%d:" + sprintf(buf,"SAS PHY Link Status: Phy=%d:" " Failed Speed Nego",PhyNumber); break; case MPI_EVENT_SAS_PLS_LR_RATE_SATA_OOB_COMPLETE: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "SAS PHY Link Status: Phy=%d:" + sprintf(buf,"SAS PHY Link Status: Phy=%d:" " Sata OOB Completed",PhyNumber); break; case MPI_EVENT_SAS_PLS_LR_RATE_1_5: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "SAS PHY Link Status: Phy=%d:" + sprintf(buf,"SAS PHY Link Status: Phy=%d:" " Rate 1.5 Gbps",PhyNumber); break; case MPI_EVENT_SAS_PLS_LR_RATE_3_0: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "SAS PHY Link Status: Phy=%d:" + sprintf(buf,"SAS PHY Link Status: Phy=%d:" " Rate 3.0 Gpbs",PhyNumber); break; default: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "SAS PHY Link Status: Phy=%d", PhyNumber); + sprintf(buf,"SAS PHY Link Status: Phy=%d", PhyNumber); break; } + ds = buf; break; } case MPI_EVENT_SAS_DISCOVERY_ERROR: @@ -5932,8 +5921,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) case MPI_EVENT_IR_RESYNC_UPDATE: { u8 resync_complete = (u8)(evData0 >> 16); - snprintf(evStr, EVENT_DESCR_STR_SZ, - "IR Resync Update: Complete = %d:",resync_complete); + char buf[40]; + sprintf(buf,"IR Resync Update: Complete = %d:",resync_complete); + ds = buf; break; } case MPI_EVENT_IR2: @@ -5986,8 +5976,7 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr) ds = "Unknown"; break; } - if (ds) - strncpy(evStr, ds, EVENT_DESCR_STR_SZ); + strcpy(evStr,ds); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -6009,7 +5998,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply int ii; int r = 0; int handlers = 0; - char evStr[EVENT_DESCR_STR_SZ]; + char evStr[100]; u8 event; /* diff --git a/trunk/drivers/message/fusion/mptbase.h b/trunk/drivers/message/fusion/mptbase.h index f673cca507e1..be7e8501b53c 100644 --- a/trunk/drivers/message/fusion/mptbase.h +++ b/trunk/drivers/message/fusion/mptbase.h @@ -76,8 +76,8 @@ #define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR #endif -#define MPT_LINUX_VERSION_COMMON "3.03.09" -#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.09" +#define MPT_LINUX_VERSION_COMMON "3.03.08" +#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.03.08" #define WHAT_MAGIC_STRING "@" "(" "#" ")" #define show_mptmod_ver(s,ver) \ @@ -489,6 +489,7 @@ typedef struct _RaidCfgData { #define MPT_RPORT_INFO_FLAGS_REGISTERED 0x01 /* rport registered */ #define MPT_RPORT_INFO_FLAGS_MISSING 0x02 /* missing from DevPage0 scan */ +#define MPT_RPORT_INFO_FLAGS_MAPPED_VDEV 0x04 /* target mapped in vdev */ /* * data allocated for each fc rport device @@ -500,6 +501,7 @@ struct mptfc_rport_info struct scsi_target *starget; FCDevicePage0_t pg0; u8 flags; + u8 remap_needed; }; /* @@ -626,11 +628,11 @@ typedef struct _MPT_ADAPTER struct work_struct mptscsih_persistTask; struct list_head fc_rports; + spinlock_t fc_rport_lock; /* list and ri flags */ spinlock_t fc_rescan_work_lock; int fc_rescan_work_count; struct work_struct fc_rescan_work; - char fc_rescan_work_q_name[KOBJ_NAME_LEN]; - struct workqueue_struct *fc_rescan_work_q; + } MPT_ADAPTER; /* diff --git a/trunk/drivers/message/fusion/mptfc.c b/trunk/drivers/message/fusion/mptfc.c index 856487741ef4..b343f2a68b1c 100644 --- a/trunk/drivers/message/fusion/mptfc.c +++ b/trunk/drivers/message/fusion/mptfc.c @@ -341,6 +341,9 @@ mptfc_generate_rport_ids(FCDevicePage0_t *pg0, struct fc_rport_identifiers *rid) rid->port_name = ((u64)pg0->WWPN.High) << 32 | (u64)pg0->WWPN.Low; rid->port_id = pg0->PortIdentifier; rid->roles = FC_RPORT_ROLE_UNKNOWN; + rid->roles |= FC_RPORT_ROLE_FCP_TARGET; + if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR) + rid->roles |= FC_RPORT_ROLE_FCP_INITIATOR; return 0; } @@ -352,18 +355,15 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0) struct fc_rport *rport; struct mptfc_rport_info *ri; int new_ri = 1; - u64 pn, nn; + u64 pn; + unsigned long flags; VirtTarget *vtarget; - u32 roles = FC_RPORT_ROLE_UNKNOWN; if (mptfc_generate_rport_ids(pg0, &rport_ids) < 0) return; - roles |= FC_RPORT_ROLE_FCP_TARGET; - if (pg0->Protocol & MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR) - roles |= FC_RPORT_ROLE_FCP_INITIATOR; - /* scan list looking for a match */ + spin_lock_irqsave(&ioc->fc_rport_lock, flags); list_for_each_entry(ri, &ioc->fc_rports, list) { pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; if (pn == rport_ids.port_name) { /* match */ @@ -373,9 +373,11 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0) } } if (new_ri) { /* allocate one */ + spin_unlock_irqrestore(&ioc->fc_rport_lock, flags); ri = kzalloc(sizeof(struct mptfc_rport_info), GFP_KERNEL); if (!ri) return; + spin_lock_irqsave(&ioc->fc_rport_lock, flags); list_add_tail(&ri->list, &ioc->fc_rports); } @@ -385,11 +387,14 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0) /* MPT_RPORT_INFO_FLAGS_REGISTERED - rport not previously deleted */ if (!(ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED)) { ri->flags |= MPT_RPORT_INFO_FLAGS_REGISTERED; + spin_unlock_irqrestore(&ioc->fc_rport_lock, flags); rport = fc_remote_port_add(ioc->sh, channel, &rport_ids); + spin_lock_irqsave(&ioc->fc_rport_lock, flags); if (rport) { ri->rport = rport; if (new_ri) /* may have been reset by user */ rport->dev_loss_tmo = mptfc_dev_loss_tmo; + *((struct mptfc_rport_info **)rport->dd_data) = ri; /* * if already mapped, remap here. If not mapped, * target_alloc will allocate vtarget and map, @@ -401,21 +406,16 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0) vtarget->target_id = pg0->CurrentTargetID; vtarget->bus_id = pg0->CurrentBus; } + ri->remap_needed = 0; } - *((struct mptfc_rport_info **)rport->dd_data) = ri; - /* scan will be scheduled once rport becomes a target */ - fc_remote_port_rolechg(rport,roles); - - pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; - nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low; dfcprintk ((MYIOC_s_INFO_FMT "mptfc_reg_dev.%d: %x, %llx / %llx, tid %d, " "rport tid %d, tmo %d\n", ioc->name, ioc->sh->host_no, pg0->PortIdentifier, - (unsigned long long)nn, - (unsigned long long)pn, + pg0->WWNN, + pg0->WWPN, pg0->CurrentTargetID, ri->rport->scsi_target_id, ri->rport->dev_loss_tmo)); @@ -425,6 +425,8 @@ mptfc_register_dev(MPT_ADAPTER *ioc, int channel, FCDevicePage0_t *pg0) ri = NULL; } } + spin_unlock_irqrestore(&ioc->fc_rport_lock,flags); + } /* @@ -474,6 +476,7 @@ mptfc_target_alloc(struct scsi_target *starget) vtarget->target_id = ri->pg0.CurrentTargetID; vtarget->bus_id = ri->pg0.CurrentBus; ri->starget = starget; + ri->remap_needed = 0; rc = 0; } } @@ -499,10 +502,10 @@ mptfc_slave_alloc(struct scsi_device *sdev) VirtDevice *vdev; struct scsi_target *starget; struct fc_rport *rport; + unsigned long flags; - starget = scsi_target(sdev); - rport = starget_to_rport(starget); + rport = starget_to_rport(scsi_target(sdev)); if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; @@ -516,8 +519,10 @@ mptfc_slave_alloc(struct scsi_device *sdev) return -ENOMEM; } + spin_lock_irqsave(&hd->ioc->fc_rport_lock,flags); sdev->hostdata = vdev; + starget = scsi_target(sdev); vtarget = starget->hostdata; if (vtarget->num_luns == 0) { @@ -530,16 +535,14 @@ mptfc_slave_alloc(struct scsi_device *sdev) vdev->vtarget = vtarget; vdev->lun = sdev->lun; - vtarget->num_luns++; + spin_unlock_irqrestore(&hd->ioc->fc_rport_lock,flags); + vtarget->num_luns++; #ifdef DMPT_DEBUG_FC - { - u64 nn, pn; + { struct mptfc_rport_info *ri; ri = *((struct mptfc_rport_info **)rport->dd_data); - pn = (u64)ri->pg0.WWPN.High << 32 | (u64)ri->pg0.WWPN.Low; - nn = (u64)ri->pg0.WWNN.High << 32 | (u64)ri->pg0.WWNN.Low; dfcprintk ((MYIOC_s_INFO_FMT "mptfc_slv_alloc.%d: num_luns %d, sdev.id %d, " "CurrentTargetID %d, %x %llx %llx\n", @@ -547,9 +550,7 @@ mptfc_slave_alloc(struct scsi_device *sdev) sdev->host->host_no, vtarget->num_luns, sdev->id, ri->pg0.CurrentTargetID, - ri->pg0.PortIdentifier, - (unsigned long long)pn, - (unsigned long long)nn)); + ri->pg0.PortIdentifier, ri->pg0.WWPN, ri->pg0.WWNN)); } #endif @@ -569,31 +570,11 @@ mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) done(SCpnt); return 0; } - - /* dd_data is null until finished adding target */ ri = *((struct mptfc_rport_info **)rport->dd_data); - if (unlikely(!ri)) { - dfcprintk ((MYIOC_s_INFO_FMT - "mptfc_qcmd.%d: %d:%d, dd_data is null.\n", - ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->name, - ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->sh->host_no, - SCpnt->device->id,SCpnt->device->lun)); - SCpnt->result = DID_IMM_RETRY << 16; - done(SCpnt); - return 0; - } + if (unlikely(ri->remap_needed)) + return SCSI_MLQUEUE_HOST_BUSY; - err = mptscsih_qcmd(SCpnt,done); -#ifdef DMPT_DEBUG_FC - if (unlikely(err)) { - dfcprintk ((MYIOC_s_INFO_FMT - "mptfc_qcmd.%d: %d:%d, mptscsih_qcmd returns non-zero.\n", - ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->name, - ((MPT_SCSI_HOST *) SCpnt->device->host->hostdata)->ioc->sh->host_no, - SCpnt->device->id,SCpnt->device->lun)); - } -#endif - return err; + return mptscsih_qcmd(SCpnt,done); } static void @@ -634,17 +615,18 @@ mptfc_rescan_devices(void *arg) MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; int ii; int work_to_do; - u64 pn; unsigned long flags; struct mptfc_rport_info *ri; do { /* start by tagging all ports as missing */ + spin_lock_irqsave(&ioc->fc_rport_lock,flags); list_for_each_entry(ri, &ioc->fc_rports, list) { if (ri->flags & MPT_RPORT_INFO_FLAGS_REGISTERED) { ri->flags |= MPT_RPORT_INFO_FLAGS_MISSING; } } + spin_unlock_irqrestore(&ioc->fc_rport_lock,flags); /* * now rescan devices known to adapter, @@ -657,24 +639,33 @@ mptfc_rescan_devices(void *arg) } /* delete devices still missing */ + spin_lock_irqsave(&ioc->fc_rport_lock, flags); list_for_each_entry(ri, &ioc->fc_rports, list) { /* if newly missing, delete it */ - if (ri->flags & MPT_RPORT_INFO_FLAGS_MISSING) { + if ((ri->flags & (MPT_RPORT_INFO_FLAGS_REGISTERED | + MPT_RPORT_INFO_FLAGS_MISSING)) + == (MPT_RPORT_INFO_FLAGS_REGISTERED | + MPT_RPORT_INFO_FLAGS_MISSING)) { ri->flags &= ~(MPT_RPORT_INFO_FLAGS_REGISTERED| MPT_RPORT_INFO_FLAGS_MISSING); - fc_remote_port_delete(ri->rport); /* won't sleep */ + ri->remap_needed = 1; + fc_remote_port_delete(ri->rport); + /* + * remote port not really deleted 'cause + * binding is by WWPN and driver only + * registers FCP_TARGETs but cannot trust + * data structures. + */ ri->rport = NULL; - - pn = (u64)ri->pg0.WWPN.High << 32 | - (u64)ri->pg0.WWPN.Low; dfcprintk ((MYIOC_s_INFO_FMT "mptfc_rescan.%d: %llx deleted\n", ioc->name, ioc->sh->host_no, - (unsigned long long)pn)); + ri->pg0.WWPN)); } } + spin_unlock_irqrestore(&ioc->fc_rport_lock,flags); /* * allow multiple passes as target state @@ -879,23 +870,10 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_mptfc_probe; } - /* initialize workqueue */ - - snprintf(ioc->fc_rescan_work_q_name, KOBJ_NAME_LEN, "mptfc_wq_%d", - sh->host_no); - ioc->fc_rescan_work_q = - create_singlethread_workqueue(ioc->fc_rescan_work_q_name); - if (!ioc->fc_rescan_work_q) - goto out_mptfc_probe; - - /* - * scan for rports - - * by doing it via the workqueue, some locking is eliminated - */ - - ioc->fc_rescan_work_count = 1; - queue_work(ioc->fc_rescan_work_q, &ioc->fc_rescan_work); - flush_workqueue(ioc->fc_rescan_work_q); + for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) { + mptfc_init_host_attr(ioc,ii); + mptfc_GetFcDevPage0(ioc,ii,mptfc_register_dev); + } return 0; @@ -971,18 +949,8 @@ mptfc_init(void) static void __devexit mptfc_remove(struct pci_dev *pdev) { - MPT_ADAPTER *ioc = pci_get_drvdata(pdev); - struct mptfc_rport_info *p, *n; - struct workqueue_struct *work_q; - unsigned long flags; - - /* destroy workqueue */ - if ((work_q=ioc->fc_rescan_work_q)) { - spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); - ioc->fc_rescan_work_q = NULL; - spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); - destroy_workqueue(work_q); - } + MPT_ADAPTER *ioc = pci_get_drvdata(pdev); + struct mptfc_rport_info *p, *n; fc_remove_host(ioc->sh); diff --git a/trunk/drivers/message/fusion/mptsas.c b/trunk/drivers/message/fusion/mptsas.c index af6ec553ff7c..e9716b10acea 100644 --- a/trunk/drivers/message/fusion/mptsas.c +++ b/trunk/drivers/message/fusion/mptsas.c @@ -91,7 +91,6 @@ enum mptsas_hotplug_action { MPTSAS_DEL_DEVICE, MPTSAS_ADD_RAID, MPTSAS_DEL_RAID, - MPTSAS_IGNORE_EVENT, }; struct mptsas_hotplug_event { @@ -299,26 +298,6 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle) return rc; } -/* - * Returns true if there is a scsi end device - */ -static inline int -mptsas_is_end_device(struct mptsas_devinfo * attached) -{ - if ((attached->handle) && - (attached->device_info & - MPI_SAS_DEVICE_INFO_END_DEVICE) && - ((attached->device_info & - MPI_SAS_DEVICE_INFO_SSP_TARGET) | - (attached->device_info & - MPI_SAS_DEVICE_INFO_STP_TARGET) | - (attached->device_info & - MPI_SAS_DEVICE_INFO_SATA_DEVICE))) - return 1; - else - return 0; -} - static int mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure, u32 form, u32 form_specific) @@ -893,11 +872,7 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info, SasDevicePage0_t *buffer; dma_addr_t dma_handle; __le64 sas_address; - int error=0; - - if (ioc->sas_discovery_runtime && - mptsas_is_end_device(device_info)) - goto out; + int error; hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION; hdr.ExtPageLength = 0; @@ -1034,11 +1009,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, CONFIGPARMS cfg; SasExpanderPage1_t *buffer; dma_addr_t dma_handle; - int error=0; - - if (ioc->sas_discovery_runtime && - mptsas_is_end_device(&phy_info->attached)) - goto out; + int error; hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION; hdr.ExtPageLength = 0; @@ -1097,6 +1068,26 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, return error; } +/* + * Returns true if there is a scsi end device + */ +static inline int +mptsas_is_end_device(struct mptsas_devinfo * attached) +{ + if ((attached->handle) && + (attached->device_info & + MPI_SAS_DEVICE_INFO_END_DEVICE) && + ((attached->device_info & + MPI_SAS_DEVICE_INFO_SSP_TARGET) | + (attached->device_info & + MPI_SAS_DEVICE_INFO_STP_TARGET) | + (attached->device_info & + MPI_SAS_DEVICE_INFO_SATA_DEVICE))) + return 1; + else + return 0; +} + static void mptsas_parse_device_info(struct sas_identify *identify, struct mptsas_devinfo *device_info) @@ -1746,9 +1737,6 @@ mptsas_hotplug_work(void *arg) break; case MPTSAS_ADD_DEVICE: - if (ev->phys_disk_num_valid) - mpt_findImVolumes(ioc); - /* * Refresh sas device pg0 data */ @@ -1880,9 +1868,6 @@ mptsas_hotplug_work(void *arg) scsi_device_put(sdev); mpt_findImVolumes(ioc); break; - case MPTSAS_IGNORE_EVENT: - default: - break; } kfree(ev); @@ -1955,8 +1940,7 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc, EVENT_DATA_RAID *raid_event_data) { struct mptsas_hotplug_event *ev; - int status = le32_to_cpu(raid_event_data->SettingsStatus); - int state = (status >> 8) & 0xff; + RAID_VOL0_STATUS * volumeStatus; if (ioc->bus_type != SAS) return; @@ -1971,7 +1955,6 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc, INIT_WORK(&ev->work, mptsas_hotplug_work, ev); ev->ioc = ioc; ev->id = raid_event_data->VolumeID; - ev->event_type = MPTSAS_IGNORE_EVENT; switch (raid_event_data->ReasonCode) { case MPI_EVENT_RAID_RC_PHYSDISK_DELETED: @@ -1983,25 +1966,6 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc, ev->phys_disk_num = raid_event_data->PhysDiskNum; ev->event_type = MPTSAS_DEL_DEVICE; break; - case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED: - switch (state) { - case MPI_PD_STATE_ONLINE: - ioc->raid_data.isRaid = 1; - ev->phys_disk_num_valid = 1; - ev->phys_disk_num = raid_event_data->PhysDiskNum; - ev->event_type = MPTSAS_ADD_DEVICE; - break; - case MPI_PD_STATE_MISSING: - case MPI_PD_STATE_NOT_COMPATIBLE: - case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST: - case MPI_PD_STATE_FAILED_AT_HOST_REQUEST: - case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON: - ev->event_type = MPTSAS_DEL_DEVICE; - break; - default: - break; - } - break; case MPI_EVENT_RAID_RC_VOLUME_DELETED: ev->event_type = MPTSAS_DEL_RAID; break; @@ -2009,18 +1973,11 @@ mptscsih_send_raid_event(MPT_ADAPTER *ioc, ev->event_type = MPTSAS_ADD_RAID; break; case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED: - switch (state) { - case MPI_RAIDVOL0_STATUS_STATE_FAILED: - case MPI_RAIDVOL0_STATUS_STATE_MISSING: - ev->event_type = MPTSAS_DEL_RAID; - break; - case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL: - case MPI_RAIDVOL0_STATUS_STATE_DEGRADED: - ev->event_type = MPTSAS_ADD_RAID; - break; - default: - break; - } + volumeStatus = (RAID_VOL0_STATUS *) & + raid_event_data->SettingsStatus; + ev->event_type = (volumeStatus->State == + MPI_RAIDVOL0_STATUS_STATE_FAILED) ? + MPTSAS_DEL_RAID : MPTSAS_ADD_RAID; break; default: break; diff --git a/trunk/drivers/message/fusion/mptscsih.c b/trunk/drivers/message/fusion/mptscsih.c index 84fa271eb8f4..3729062db317 100644 --- a/trunk/drivers/message/fusion/mptscsih.c +++ b/trunk/drivers/message/fusion/mptscsih.c @@ -632,11 +632,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */ /* Spoof to SCSI Selection Timeout! */ - if (ioc->bus_type != FC) - sc->result = DID_NO_CONNECT << 16; - /* else fibre, just stall until rescan event */ - else - sc->result = DID_REQUEUE << 16; + sc->result = DID_NO_CONNECT << 16; if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF) hd->sel_timeout[pScsiReq->TargetID]++; @@ -881,7 +877,7 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice) struct scsi_cmnd *sc; dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n", - vdevice->vtarget->target_id, vdevice->lun, max)); + vdevice->target_id, vdevice->lun, max)); for (ii=0; ii < max; ii++) { if ((sc = hd->ScsiLookup[ii]) != NULL) { @@ -1649,6 +1645,7 @@ int mptscsih_abort(struct scsi_cmnd * SCpnt) { MPT_SCSI_HOST *hd; + MPT_ADAPTER *ioc; MPT_FRAME_HDR *mf; u32 ctx2abort; int scpnt_idx; @@ -1666,6 +1663,14 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) return FAILED; } + ioc = hd->ioc; + if (hd->resetPending) { + return FAILED; + } + + if (hd->timeouts < -1) + hd->timeouts++; + /* Find this command */ if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) { @@ -1679,13 +1684,6 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) return SUCCESS; } - if (hd->resetPending) { - return FAILED; - } - - if (hd->timeouts < -1) - hd->timeouts++; - printk(KERN_WARNING MYNAM ": %s: attempting task abort! (sc=%p)\n", hd->ioc->name, SCpnt); scsi_print_command(SCpnt); @@ -1705,7 +1703,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) vdev = SCpnt->device->hostdata; retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, vdev->vtarget->bus_id, vdev->vtarget->target_id, vdev->lun, - ctx2abort, mptscsih_get_tm_timeout(hd->ioc)); + ctx2abort, mptscsih_get_tm_timeout(ioc)); printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n", hd->ioc->name, @@ -2523,15 +2521,15 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) /* 7. FC: Rescan for blocked rports which might have returned. */ - if (ioc->bus_type == FC) { + else if (ioc->bus_type == FC) { + int work_count; + unsigned long flags; + spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); - if (ioc->fc_rescan_work_q) { - if (ioc->fc_rescan_work_count++ == 0) { - queue_work(ioc->fc_rescan_work_q, - &ioc->fc_rescan_work); - } - } + work_count = ++ioc->fc_rescan_work_count; spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); + if (work_count == 1) + schedule_work(&ioc->fc_rescan_work); } dtmprintk((MYIOC_s_WARN_FMT "Post-Reset complete.\n", ioc->name)); @@ -2546,6 +2544,7 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) { MPT_SCSI_HOST *hd; u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; + int work_count; unsigned long flags; devtverboseprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", @@ -2570,13 +2569,10 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) case MPI_EVENT_RESCAN: /* 06 */ spin_lock_irqsave(&ioc->fc_rescan_work_lock, flags); - if (ioc->fc_rescan_work_q) { - if (ioc->fc_rescan_work_count++ == 0) { - queue_work(ioc->fc_rescan_work_q, - &ioc->fc_rescan_work); - } - } + work_count = ++ioc->fc_rescan_work_count; spin_unlock_irqrestore(&ioc->fc_rescan_work_lock, flags); + if (work_count == 1) + schedule_work(&ioc->fc_rescan_work); break; /* diff --git a/trunk/drivers/message/fusion/mptspi.c b/trunk/drivers/message/fusion/mptspi.c index f2a4d382ea19..09c745b19cc8 100644 --- a/trunk/drivers/message/fusion/mptspi.c +++ b/trunk/drivers/message/fusion/mptspi.c @@ -783,70 +783,6 @@ static struct pci_device_id mptspi_pci_table[] = { }; MODULE_DEVICE_TABLE(pci, mptspi_pci_table); - -/* - * renegotiate for a given target - */ -static void -mptspi_dv_renegotiate_work(void *data) -{ - struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data; - struct _MPT_SCSI_HOST *hd = wqw->hd; - struct scsi_device *sdev; - - kfree(wqw); - - shost_for_each_device(sdev, hd->ioc->sh) - mptspi_dv_device(hd, sdev); -} - -static void -mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd) -{ - struct work_queue_wrapper *wqw = kmalloc(sizeof(*wqw), GFP_ATOMIC); - - if (!wqw) - return; - - INIT_WORK(&wqw->work, mptspi_dv_renegotiate_work, wqw); - wqw->hd = hd; - - schedule_work(&wqw->work); -} - -/* - * spi module reset handler - */ -static int -mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) -{ - struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)ioc->sh->hostdata; - int rc; - - rc = mptscsih_ioc_reset(ioc, reset_phase); - - if (reset_phase == MPT_IOC_POST_RESET) - mptspi_dv_renegotiate(hd); - - return rc; -} - -/* - * spi module resume handler - */ -static int -mptspi_resume(struct pci_dev *pdev) -{ - MPT_ADAPTER *ioc = pci_get_drvdata(pdev); - struct _MPT_SCSI_HOST *hd = (struct _MPT_SCSI_HOST *)ioc->sh->hostdata; - int rc; - - rc = mptscsih_resume(pdev); - mptspi_dv_renegotiate(hd); - - return rc; -} - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -1096,7 +1032,7 @@ static struct pci_driver mptspi_driver = { .shutdown = mptscsih_shutdown, #ifdef CONFIG_PM .suspend = mptscsih_suspend, - .resume = mptspi_resume, + .resume = mptscsih_resume, #endif }; @@ -1125,7 +1061,7 @@ mptspi_init(void) ": Registered for IOC event notifications\n")); } - if (mpt_reset_register(mptspiDoneCtx, mptspi_ioc_reset) == 0) { + if (mpt_reset_register(mptspiDoneCtx, mptscsih_ioc_reset) == 0) { dprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n")); } diff --git a/trunk/drivers/net/dl2k.c b/trunk/drivers/net/dl2k.c index 1ddefd281213..1f3627470c95 100644 --- a/trunk/drivers/net/dl2k.c +++ b/trunk/drivers/net/dl2k.c @@ -765,7 +765,7 @@ rio_free_tx (struct net_device *dev, int irq) break; skb = np->tx_skbuff[entry]; pci_unmap_single (np->pdev, - np->tx_ring[entry].fraginfo & DMA_48BIT_MASK, + np->tx_ring[entry].fraginfo & 0xffffffffffff, skb->len, PCI_DMA_TODEVICE); if (irq) dev_kfree_skb_irq (skb); @@ -893,7 +893,7 @@ receive_packet (struct net_device *dev) /* Small skbuffs for short packets */ if (pkt_len > copy_thresh) { pci_unmap_single (np->pdev, - desc->fraginfo & DMA_48BIT_MASK, + desc->fraginfo & 0xffffffffffff, np->rx_buf_sz, PCI_DMA_FROMDEVICE); skb_put (skb = np->rx_skbuff[entry], pkt_len); @@ -901,7 +901,7 @@ receive_packet (struct net_device *dev) } else if ((skb = dev_alloc_skb (pkt_len + 2)) != NULL) { pci_dma_sync_single_for_cpu(np->pdev, desc->fraginfo & - DMA_48BIT_MASK, + 0xffffffffffff, np->rx_buf_sz, PCI_DMA_FROMDEVICE); skb->dev = dev; @@ -913,7 +913,7 @@ receive_packet (struct net_device *dev) skb_put (skb, pkt_len); pci_dma_sync_single_for_device(np->pdev, desc->fraginfo & - DMA_48BIT_MASK, + 0xffffffffffff, np->rx_buf_sz, PCI_DMA_FROMDEVICE); } @@ -1800,7 +1800,7 @@ rio_close (struct net_device *dev) skb = np->rx_skbuff[i]; if (skb) { pci_unmap_single(np->pdev, - np->rx_ring[i].fraginfo & DMA_48BIT_MASK, + np->rx_ring[i].fraginfo & 0xffffffffffff, skb->len, PCI_DMA_FROMDEVICE); dev_kfree_skb (skb); np->rx_skbuff[i] = NULL; @@ -1810,7 +1810,7 @@ rio_close (struct net_device *dev) skb = np->tx_skbuff[i]; if (skb) { pci_unmap_single(np->pdev, - np->tx_ring[i].fraginfo & DMA_48BIT_MASK, + np->tx_ring[i].fraginfo & 0xffffffffffff, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb (skb); np->tx_skbuff[i] = NULL; diff --git a/trunk/drivers/net/phy/mdio_bus.c b/trunk/drivers/net/phy/mdio_bus.c index 1b236bdf6b92..459443b572ce 100644 --- a/trunk/drivers/net/phy/mdio_bus.c +++ b/trunk/drivers/net/phy/mdio_bus.c @@ -60,10 +60,8 @@ int mdiobus_register(struct mii_bus *bus) for (i = 0; i < PHY_MAX_ADDR; i++) { struct phy_device *phydev; - if (bus->phy_mask & (1 << i)) { - bus->phy_map[i] = NULL; + if (bus->phy_mask & (1 << i)) continue; - } phydev = get_phy_device(bus, i); diff --git a/trunk/drivers/net/sis900.c b/trunk/drivers/net/sis900.c index f5a3bf4d959a..b82191d2bee1 100644 --- a/trunk/drivers/net/sis900.c +++ b/trunk/drivers/net/sis900.c @@ -127,7 +127,6 @@ static const struct mii_chip_info { } mii_chip_table[] = { { "SiS 900 Internal MII PHY", 0x001d, 0x8000, LAN }, { "SiS 7014 Physical Layer Solution", 0x0016, 0xf830, LAN }, - { "SiS 900 on Foxconn 661 7MI", 0x0143, 0xBC70, LAN }, { "Altimata AC101LF PHY", 0x0022, 0x5520, LAN }, { "ADM 7001 LAN PHY", 0x002e, 0xcc60, LAN }, { "AMD 79C901 10BASE-T PHY", 0x0000, 0x6B70, LAN }, diff --git a/trunk/drivers/net/sky2.c b/trunk/drivers/net/sky2.c index ffd267fab21d..60cdfcabe1fd 100644 --- a/trunk/drivers/net/sky2.c +++ b/trunk/drivers/net/sky2.c @@ -128,7 +128,6 @@ MODULE_DEVICE_TABLE(pci, sky2_id_table); /* Avoid conditionals by using array */ static const unsigned txqaddr[] = { Q_XA1, Q_XA2 }; static const unsigned rxqaddr[] = { Q_R1, Q_R2 }; -static const u32 portirq_msk[] = { Y2_IS_PORT_1, Y2_IS_PORT_2 }; /* This driver supports yukon2 chipset only */ static const char *yukon2_name[] = { @@ -1085,7 +1084,7 @@ static int sky2_up(struct net_device *dev) /* Enable interrupts from phy/mac for port */ imask = sky2_read32(hw, B0_IMSK); - imask |= portirq_msk[port]; + imask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; sky2_write32(hw, B0_IMSK, imask); return 0; @@ -1436,7 +1435,7 @@ static int sky2_down(struct net_device *dev) /* Disable port IRQ */ imask = sky2_read32(hw, B0_IMSK); - imask &= ~portirq_msk[port]; + imask &= ~(sky2->port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; sky2_write32(hw, B0_IMSK, imask); /* turn off LED's */ diff --git a/trunk/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/trunk/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c index 0c9c2f400bf6..cb30d9c1153d 100644 --- a/trunk/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c +++ b/trunk/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c @@ -219,7 +219,6 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ahc->flags |= AHC_39BIT_ADDRESSING; } else { if (dma_set_mask(dev, DMA_32BIT_MASK)) { - ahc_free(ahc); printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n"); return (-ENODEV); } diff --git a/trunk/drivers/scsi/aic7xxx/aic7xxx_pci.c b/trunk/drivers/scsi/aic7xxx/aic7xxx_pci.c index 3adecef21783..5f586140e057 100644 --- a/trunk/drivers/scsi/aic7xxx/aic7xxx_pci.c +++ b/trunk/drivers/scsi/aic7xxx/aic7xxx_pci.c @@ -2036,12 +2036,12 @@ ahc_pci_resume(struct ahc_softc *ahc) * that the OS doesn't know about and rely on our chip * reset handler to handle the rest. */ - ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, - ahc->bus_softc.pci_softc.devconfig, /*bytes*/4); - ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, - ahc->bus_softc.pci_softc.command, /*bytes*/1); - ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, - ahc->bus_softc.pci_softc.csize_lattime, /*bytes*/1); + ahc_pci_write_config(ahc->dev_softc, DEVCONFIG, /*bytes*/4, + ahc->bus_softc.pci_softc.devconfig); + ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, /*bytes*/1, + ahc->bus_softc.pci_softc.command); + ahc_pci_write_config(ahc->dev_softc, CSIZE_LATTIME, /*bytes*/1, + ahc->bus_softc.pci_softc.csize_lattime); if ((ahc->flags & AHC_HAS_TERM_LOGIC) != 0) { struct seeprom_descriptor sd; u_int sxfrctl1; diff --git a/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c b/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c index 2e9be83a697f..0a8ad37ae899 100644 --- a/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -739,8 +739,7 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) { struct viosrp_adapter_info *req; struct srp_event_struct *evt_struct; - dma_addr_t addr; - + evt_struct = get_event_struct(&hostdata->pool); if (!evt_struct) { printk(KERN_ERR "ibmvscsi: couldn't allocate an event " @@ -758,10 +757,10 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) req->common.type = VIOSRP_ADAPTER_INFO_TYPE; req->common.length = sizeof(hostdata->madapter_info); - req->buffer = addr = dma_map_single(hostdata->dev, - &hostdata->madapter_info, - sizeof(hostdata->madapter_info), - DMA_BIDIRECTIONAL); + req->buffer = dma_map_single(hostdata->dev, + &hostdata->madapter_info, + sizeof(hostdata->madapter_info), + DMA_BIDIRECTIONAL); if (dma_mapping_error(req->buffer)) { printk(KERN_ERR @@ -771,13 +770,8 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) return; } - if (ibmvscsi_send_srp_event(evt_struct, hostdata)) { + if (ibmvscsi_send_srp_event(evt_struct, hostdata)) printk(KERN_ERR "ibmvscsi: couldn't send ADAPTER_INFO_REQ!\n"); - dma_unmap_single(hostdata->dev, - addr, - sizeof(hostdata->madapter_info), - DMA_BIDIRECTIONAL); - } }; /** @@ -1265,7 +1259,6 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, { struct viosrp_host_config *host_config; struct srp_event_struct *evt_struct; - dma_addr_t addr; int rc; evt_struct = get_event_struct(&hostdata->pool); @@ -1286,9 +1279,8 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, memset(host_config, 0x00, sizeof(*host_config)); host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; host_config->common.length = length; - host_config->buffer = addr = dma_map_single(hostdata->dev, buffer, - length, - DMA_BIDIRECTIONAL); + host_config->buffer = dma_map_single(hostdata->dev, buffer, length, + DMA_BIDIRECTIONAL); if (dma_mapping_error(host_config->buffer)) { printk(KERN_ERR @@ -1299,9 +1291,11 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, init_completion(&evt_struct->comp); rc = ibmvscsi_send_srp_event(evt_struct, hostdata); - if (rc == 0) + if (rc == 0) { wait_for_completion(&evt_struct->comp); - dma_unmap_single(hostdata->dev, addr, length, DMA_BIDIRECTIONAL); + dma_unmap_single(hostdata->dev, host_config->buffer, + length, DMA_BIDIRECTIONAL); + } return rc; } diff --git a/trunk/drivers/scsi/lpfc/lpfc_crtn.h b/trunk/drivers/scsi/lpfc/lpfc_crtn.h index ee22173fce43..fad607b2e6f4 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_crtn.h +++ b/trunk/drivers/scsi/lpfc/lpfc_crtn.h @@ -27,6 +27,7 @@ void lpfc_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); +void lpfc_set_slim(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); int lpfc_reg_login(struct lpfc_hba *, uint32_t, uint8_t *, LPFC_MBOXQ_t *, uint32_t); void lpfc_unreg_login(struct lpfc_hba *, uint32_t, LPFC_MBOXQ_t *); diff --git a/trunk/drivers/scsi/lpfc/lpfc_disc.h b/trunk/drivers/scsi/lpfc/lpfc_disc.h index 41cf5d3ea6ce..8932b1be2b60 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_disc.h +++ b/trunk/drivers/scsi/lpfc/lpfc_disc.h @@ -113,7 +113,6 @@ struct lpfc_nodelist { #define NLP_NPR_ADISC 0x2000000 /* Issue ADISC when dq'ed from NPR list */ #define NLP_DELAY_REMOVE 0x4000000 /* Defer removal till end of DSM */ -#define NLP_NODEV_REMOVE 0x8000000 /* Defer removal till discovery ends */ /* Defines for list searchs */ #define NLP_SEARCH_MAPPED 0x1 /* search mapped */ diff --git a/trunk/drivers/scsi/lpfc/lpfc_els.c b/trunk/drivers/scsi/lpfc/lpfc_els.c index 283b7d824c34..4813beaaca8f 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_els.c +++ b/trunk/drivers/scsi/lpfc/lpfc_els.c @@ -302,6 +302,10 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, if (lpfc_reg_login(phba, Fabric_DID, (uint8_t *) sp, mbox, 0)) goto fail_free_mbox; + /* + * set_slim mailbox command needs to execute first, + * queue this command to be processed later. + */ mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login; mbox->context2 = ndlp; @@ -777,26 +781,25 @@ lpfc_cmpl_els_plogi(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, if (disc && phba->num_disc_nodes) { /* Check to see if there are more PLOGIs to be sent */ lpfc_more_plogi(phba); + } - if (phba->num_disc_nodes == 0) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_NDISC_ACTIVE; - spin_unlock_irq(phba->host->host_lock); + if (phba->num_disc_nodes == 0) { + spin_lock_irq(phba->host->host_lock); + phba->fc_flag &= ~FC_NDISC_ACTIVE; + spin_unlock_irq(phba->host->host_lock); - lpfc_can_disctmo(phba); - if (phba->fc_flag & FC_RSCN_MODE) { - /* - * Check to see if more RSCNs came in while - * we were processing this one. - */ - if ((phba->fc_rscn_id_cnt == 0) && - (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { - spin_lock_irq(phba->host->host_lock); - phba->fc_flag &= ~FC_RSCN_MODE; - spin_unlock_irq(phba->host->host_lock); - } else { - lpfc_els_handle_rscn(phba); - } + lpfc_can_disctmo(phba); + if (phba->fc_flag & FC_RSCN_MODE) { + /* Check to see if more RSCNs came in while we were + * processing this one. + */ + if ((phba->fc_rscn_id_cnt == 0) && + (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { + spin_lock_irq(phba->host->host_lock); + phba->fc_flag &= ~FC_RSCN_MODE; + spin_unlock_irq(phba->host->host_lock); + } else { + lpfc_els_handle_rscn(phba); } } } @@ -1260,7 +1263,7 @@ lpfc_issue_els_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; - cmdsize = (2 * sizeof (uint32_t)) + sizeof (struct lpfc_name); + cmdsize = 2 * (sizeof (uint32_t) + sizeof (struct lpfc_name)); elsiocb = lpfc_prep_els_iocb(phba, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_LOGO); if (!elsiocb) @@ -1448,23 +1451,22 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_hba *phba, struct lpfc_nodelist * nlp) * PLOGIs to be sent */ lpfc_more_plogi(phba); + } - if (phba->num_disc_nodes == 0) { - phba->fc_flag &= ~FC_NDISC_ACTIVE; - lpfc_can_disctmo(phba); - if (phba->fc_flag & FC_RSCN_MODE) { - /* - * Check to see if more RSCNs - * came in while we were - * processing this one. - */ - if((phba->fc_rscn_id_cnt==0) && - !(phba->fc_flag & FC_RSCN_DISCOVERY)) { - phba->fc_flag &= ~FC_RSCN_MODE; - } - else { - lpfc_els_handle_rscn(phba); - } + if (phba->num_disc_nodes == 0) { + phba->fc_flag &= ~FC_NDISC_ACTIVE; + lpfc_can_disctmo(phba); + if (phba->fc_flag & FC_RSCN_MODE) { + /* Check to see if more RSCNs + * came in while we were + * processing this one. + */ + if((phba->fc_rscn_id_cnt==0) && + (!(phba->fc_flag & FC_RSCN_DISCOVERY))) { + phba->fc_flag &= ~FC_RSCN_MODE; + } + else { + lpfc_els_handle_rscn(phba); } } } @@ -1870,6 +1872,9 @@ lpfc_cmpl_els_acc(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb, if (mbox) { if ((rspiocb->iocb.ulpStatus == 0) && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { + /* set_slim mailbox command needs to execute first, + * queue this command to be processed later. + */ lpfc_unreg_rpi(phba, ndlp); mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; mbox->context2 = ndlp; @@ -1915,7 +1920,6 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag, uint8_t *pcmd; uint16_t cmdsize; int rc; - ELS_PKT *els_pkt_ptr; psli = &phba->sli; pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ @@ -1954,23 +1958,6 @@ lpfc_els_rsp_acc(struct lpfc_hba * phba, uint32_t flag, pcmd += sizeof (uint32_t); memcpy(pcmd, &phba->fc_sparam, sizeof (struct serv_parm)); break; - case ELS_CMD_PRLO: - cmdsize = sizeof (uint32_t) + sizeof (PRLO); - elsiocb = lpfc_prep_els_iocb(phba, 0, cmdsize, oldiocb->retry, - ndlp, ndlp->nlp_DID, ELS_CMD_PRLO); - if (!elsiocb) - return 1; - - icmd = &elsiocb->iocb; - icmd->ulpContext = oldcmd->ulpContext; /* Xri */ - pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt); - - memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt, - sizeof (uint32_t) + sizeof (PRLO)); - *((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC; - els_pkt_ptr = (ELS_PKT *) pcmd; - els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; - break; default: return 1; } @@ -2511,7 +2498,7 @@ lpfc_els_rcv_rscn(struct lpfc_hba * phba, /* If we are about to begin discovery, just ACC the RSCN. * Discovery processing will satisfy it. */ - if (phba->hba_state <= LPFC_NS_QRY) { + if (phba->hba_state < LPFC_NS_QRY) { lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, newnode); return 0; diff --git a/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c b/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c index adb086009ae0..6721e679df62 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -311,8 +311,8 @@ lpfc_workq_post_event(struct lpfc_hba * phba, void *arg1, void *arg2, evtp->evt_arg2 = arg2; evtp->evt = evt; - spin_lock_irq(phba->host->host_lock); list_add_tail(&evtp->evt_listp, &phba->work_list); + spin_lock_irq(phba->host->host_lock); if (phba->work_wait) wake_up(phba->work_wait); spin_unlock_irq(phba->host->host_lock); @@ -1071,6 +1071,10 @@ lpfc_register_remote_port(struct lpfc_hba * phba, /* initialize static port data */ rport->maxframe_size = ndlp->nlp_maxframe; rport->supported_classes = ndlp->nlp_class_sup; + if ((rport->scsi_target_id != -1) && + (rport->scsi_target_id < MAX_FCP_TARGET)) { + ndlp->nlp_sid = rport->scsi_target_id; + } rdata = rport->dd_data; rdata->pnode = ndlp; @@ -1083,10 +1087,6 @@ lpfc_register_remote_port(struct lpfc_hba * phba, if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) fc_remote_port_rolechg(rport, rport_ids.roles); - if ((rport->scsi_target_id != -1) && - (rport->scsi_target_id < MAX_FCP_TARGET)) { - ndlp->nlp_sid = rport->scsi_target_id; - } return; } @@ -1238,7 +1238,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list) evt_listp); } - nlp->nlp_flag &= ~NLP_NODEV_REMOVE; nlp->nlp_type |= NLP_FC_NODE; break; case NLP_MAPPED_LIST: @@ -1259,7 +1258,6 @@ lpfc_nlp_list(struct lpfc_hba * phba, struct lpfc_nodelist * nlp, int list) evt_listp); } - nlp->nlp_flag &= ~NLP_NODEV_REMOVE; break; case NLP_NPR_LIST: nlp->nlp_flag |= list; @@ -1404,8 +1402,6 @@ lpfc_check_sli_ndlp(struct lpfc_hba * phba, if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) return 1; case CMD_ELS_REQUEST64_CR: - if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) - return 1; case CMD_XMIT_ELS_RSP64_CX: if (iocb->context1 == (uint8_t *) ndlp) return 1; @@ -1905,8 +1901,10 @@ lpfc_setup_disc_node(struct lpfc_hba * phba, uint32_t did) */ if (ndlp->nlp_flag & NLP_DELAY_TMO) lpfc_cancel_retry_delay_tmo(phba, ndlp); - } else + } else { + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; ndlp = NULL; + } } else { flg = ndlp->nlp_flag & NLP_LIST_MASK; if ((flg == NLP_ADISC_LIST) || (flg == NLP_PLOGI_LIST)) diff --git a/trunk/drivers/scsi/lpfc/lpfc_hw.h b/trunk/drivers/scsi/lpfc/lpfc_hw.h index eedf98801366..54d04188f7cc 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_hw.h +++ b/trunk/drivers/scsi/lpfc/lpfc_hw.h @@ -449,7 +449,6 @@ struct serv_parm { /* Structure is in Big Endian format */ #define ELS_CMD_RRQ 0x12000000 #define ELS_CMD_PRLI 0x20100014 #define ELS_CMD_PRLO 0x21100014 -#define ELS_CMD_PRLO_ACC 0x02100014 #define ELS_CMD_PDISC 0x50000000 #define ELS_CMD_FDISC 0x51000000 #define ELS_CMD_ADISC 0x52000000 @@ -485,7 +484,6 @@ struct serv_parm { /* Structure is in Big Endian format */ #define ELS_CMD_RRQ 0x12 #define ELS_CMD_PRLI 0x14001020 #define ELS_CMD_PRLO 0x14001021 -#define ELS_CMD_PRLO_ACC 0x14001002 #define ELS_CMD_PDISC 0x50 #define ELS_CMD_FDISC 0x51 #define ELS_CMD_ADISC 0x52 @@ -1541,7 +1539,6 @@ typedef struct { #define FLAGS_TOPOLOGY_FAILOVER 0x0400 /* Bit 10 */ #define FLAGS_LINK_SPEED 0x0800 /* Bit 11 */ -#define FLAGS_IMED_ABORT 0x04000 /* Bit 14 */ uint32_t link_speed; #define LINK_SPEED_AUTO 0 /* Auto selection */ diff --git a/trunk/drivers/scsi/lpfc/lpfc_init.c b/trunk/drivers/scsi/lpfc/lpfc_init.c index 908d0f27706f..66d5d003555d 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_init.c +++ b/trunk/drivers/scsi/lpfc/lpfc_init.c @@ -294,6 +294,15 @@ lpfc_config_port_post(struct lpfc_hba * phba) } } + /* This should turn on DELAYED ABTS for ELS timeouts */ + lpfc_set_slim(phba, pmb, 0x052198, 0x1); + if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { + phba->hba_state = LPFC_HBA_ERROR; + mempool_free( pmb, phba->mbox_mem_pool); + return -EIO; + } + + lpfc_read_config(phba, pmb); if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) { lpfc_printf_log(phba, @@ -795,7 +804,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba * phba, uint8_t * mdp, uint8_t * descp) int max_speed; char * ports; char * bus; - } m = {"", 0, "", ""}; + } m; pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype); ports = (hdrtype == 0x80) ? "2-port " : ""; @@ -1618,7 +1627,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) error = lpfc_alloc_sysfs_attr(phba); if (error) - goto out_remove_host; + goto out_kthread_stop; error = request_irq(phba->pcidev->irq, lpfc_intr_handler, SA_SHIRQ, LPFC_DRIVER_NAME, phba); @@ -1635,10 +1644,8 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; error = lpfc_sli_hba_setup(phba); - if (error) { - error = -ENODEV; + if (error) goto out_free_irq; - } if (phba->cfg_poll & DISABLE_FCP_RING_INT) { spin_lock_irq(phba->host->host_lock); @@ -1693,9 +1700,6 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) free_irq(phba->pcidev->irq, phba); out_free_sysfs_attr: lpfc_free_sysfs_attr(phba); -out_remove_host: - fc_remove_host(phba->host); - scsi_remove_host(phba->host); out_kthread_stop: kthread_stop(phba->worker_thread); out_free_iocbq: @@ -1717,14 +1721,12 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) out_idr_remove: idr_remove(&lpfc_hba_index, phba->brd_no); out_put_host: - phba->host = NULL; scsi_host_put(host); out_release_regions: pci_release_regions(pdev); out_disable_device: pci_disable_device(pdev); out: - pci_set_drvdata(pdev, NULL); return error; } diff --git a/trunk/drivers/scsi/lpfc/lpfc_mbox.c b/trunk/drivers/scsi/lpfc/lpfc_mbox.c index e42f22aaf71b..c585e2b2e589 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_mbox.c +++ b/trunk/drivers/scsi/lpfc/lpfc_mbox.c @@ -200,9 +200,6 @@ lpfc_init_link(struct lpfc_hba * phba, break; } - /* Enable asynchronous ABTS responses from firmware */ - mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT; - /* NEW_FEATURE * Setting up the link speed */ @@ -295,6 +292,36 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint32_t did, LPFC_MBOXQ_t * pmb) return; } +/***********************************************/ + +/* command to write slim */ +/***********************************************/ +void +lpfc_set_slim(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint32_t addr, + uint32_t value) +{ + MAILBOX_t *mb; + + mb = &pmb->mb; + memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); + + /* addr = 0x090597 is AUTO ABTS disable for ELS commands */ + /* addr = 0x052198 is DELAYED ABTS enable for ELS commands */ + + /* + * Always turn on DELAYED ABTS for ELS timeouts + */ + if ((addr == 0x052198) && (value == 0)) + value = 1; + + mb->un.varWords[0] = addr; + mb->un.varWords[1] = value; + + mb->mbxCommand = MBX_SET_SLIM; + mb->mbxOwner = OWN_HOST; + return; +} + /**********************************************/ /* lpfc_read_nv Issue a READ CONFIG */ /* mailbox command */ diff --git a/trunk/drivers/scsi/lpfc/lpfc_nportdisc.c b/trunk/drivers/scsi/lpfc/lpfc_nportdisc.c index 27d60ad897cd..3d77bd999b70 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/trunk/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -465,18 +465,14 @@ lpfc_rcv_padisc(struct lpfc_hba * phba, static int lpfc_rcv_logo(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, - struct lpfc_iocbq *cmdiocb, - uint32_t els_cmd) + struct lpfc_iocbq *cmdiocb) { /* Put ndlp on NPR list with 1 sec timeout for plogi, ACC logo */ /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary * PLOGIs during LOGO storms from a device. */ ndlp->nlp_flag |= NLP_LOGO_ACC; - if (els_cmd == ELS_CMD_PRLO) - lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); - else - lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); if (!(ndlp->nlp_type & NLP_FABRIC) || (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { @@ -685,7 +681,7 @@ lpfc_rcv_logo_plogi_issue(struct lpfc_hba * phba, /* software abort outstanding PLOGI */ lpfc_els_abort(phba, ndlp, 1); - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); + lpfc_rcv_logo(phba, ndlp, cmdiocb); return ndlp->nlp_state; } @@ -792,6 +788,10 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_hba * phba, if (lpfc_reg_login (phba, irsp->un.elsreq64.remoteID, (uint8_t *) sp, mbox, 0) == 0) { + /* set_slim mailbox command needs to + * execute first, queue this command to + * be processed later. + */ switch (ndlp->nlp_DID) { case NameServer_DID: mbox->mbox_cmpl = @@ -832,17 +832,11 @@ static uint32_t lpfc_device_rm_plogi_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { - if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - return ndlp->nlp_state; - } - else { - /* software abort outstanding PLOGI */ - lpfc_els_abort(phba, ndlp, 1); + /* software abort outstanding PLOGI */ + lpfc_els_abort(phba, ndlp, 1); - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - return NLP_STE_FREED_NODE; - } + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; } static uint32_t @@ -857,7 +851,7 @@ lpfc_device_recov_plogi_issue(struct lpfc_hba * phba, ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; spin_unlock_irq(phba->host->host_lock); return ndlp->nlp_state; @@ -911,7 +905,7 @@ lpfc_rcv_logo_adisc_issue(struct lpfc_hba * phba, /* software abort outstanding ADISC */ lpfc_els_abort(phba, ndlp, 0); - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); + lpfc_rcv_logo(phba, ndlp, cmdiocb); return ndlp->nlp_state; } @@ -938,7 +932,7 @@ lpfc_rcv_prlo_adisc_issue(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; /* Treat like rcv logo */ - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO); + lpfc_rcv_logo(phba, ndlp, cmdiocb); return ndlp->nlp_state; } @@ -993,17 +987,11 @@ lpfc_device_rm_adisc_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { - if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - return ndlp->nlp_state; - } - else { - /* software abort outstanding ADISC */ - lpfc_els_abort(phba, ndlp, 1); + /* software abort outstanding ADISC */ + lpfc_els_abort(phba, ndlp, 1); - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - return NLP_STE_FREED_NODE; - } + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; } static uint32_t @@ -1018,7 +1006,7 @@ lpfc_device_recov_adisc_issue(struct lpfc_hba * phba, ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; ndlp->nlp_flag |= NLP_NPR_ADISC; spin_unlock_irq(phba->host->host_lock); @@ -1060,7 +1048,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); + lpfc_rcv_logo(phba, ndlp, cmdiocb); return ndlp->nlp_state; } @@ -1085,7 +1073,7 @@ lpfc_rcv_prlo_reglogin_issue(struct lpfc_hba * phba, struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); return ndlp->nlp_state; } @@ -1145,14 +1133,8 @@ lpfc_device_rm_reglogin_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { - if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - return ndlp->nlp_state; - } - else { - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - return NLP_STE_FREED_NODE; - } + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; } static uint32_t @@ -1164,7 +1146,7 @@ lpfc_device_recov_reglogin_issue(struct lpfc_hba * phba, ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; spin_unlock_irq(phba->host->host_lock); return ndlp->nlp_state; } @@ -1204,7 +1186,7 @@ lpfc_rcv_logo_prli_issue(struct lpfc_hba * phba, /* Software abort outstanding PRLI before sending acc */ lpfc_els_abort(phba, ndlp, 1); - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); + lpfc_rcv_logo(phba, ndlp, cmdiocb); return ndlp->nlp_state; } @@ -1232,7 +1214,7 @@ lpfc_rcv_prlo_prli_issue(struct lpfc_hba * phba, struct lpfc_iocbq *cmdiocb; cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); return ndlp->nlp_state; } @@ -1296,17 +1278,11 @@ static uint32_t lpfc_device_rm_prli_issue(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { - if(ndlp->nlp_flag & NLP_NPR_2B_DISC) { - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - return ndlp->nlp_state; - } - else { - /* software abort outstanding PLOGI */ - lpfc_els_abort(phba, ndlp, 1); + /* software abort outstanding PRLI */ + lpfc_els_abort(phba, ndlp, 1); - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - return NLP_STE_FREED_NODE; - } + lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); + return NLP_STE_FREED_NODE; } @@ -1337,7 +1313,7 @@ lpfc_device_recov_prli_issue(struct lpfc_hba * phba, ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; spin_unlock_irq(phba->host->host_lock); return ndlp->nlp_state; } @@ -1375,7 +1351,7 @@ lpfc_rcv_logo_unmap_node(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); + lpfc_rcv_logo(phba, ndlp, cmdiocb); return ndlp->nlp_state; } @@ -1399,7 +1375,7 @@ lpfc_rcv_prlo_unmap_node(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_els_rsp_acc(phba, ELS_CMD_PRLO, cmdiocb, ndlp, NULL, 0); + lpfc_els_rsp_acc(phba, ELS_CMD_ACC, cmdiocb, ndlp, NULL, 0); return ndlp->nlp_state; } @@ -1410,7 +1386,7 @@ lpfc_device_recov_unmap_node(struct lpfc_hba * phba, ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; lpfc_disc_set_adisc(phba, ndlp); return ndlp->nlp_state; @@ -1448,7 +1424,7 @@ lpfc_rcv_logo_mapped_node(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); + lpfc_rcv_logo(phba, ndlp, cmdiocb); return ndlp->nlp_state; } @@ -1480,7 +1456,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_hba * phba, spin_unlock_irq(phba->host->host_lock); /* Treat like rcv logo */ - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_PRLO); + lpfc_rcv_logo(phba, ndlp, cmdiocb); return ndlp->nlp_state; } @@ -1493,7 +1469,7 @@ lpfc_device_recov_mapped_node(struct lpfc_hba * phba, ndlp->nlp_state = NLP_STE_NPR_NODE; lpfc_nlp_list(phba, ndlp, NLP_NPR_LIST); spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; spin_unlock_irq(phba->host->host_lock); lpfc_disc_set_adisc(phba, ndlp); return ndlp->nlp_state; @@ -1575,7 +1551,7 @@ lpfc_rcv_logo_npr_node(struct lpfc_hba * phba, cmdiocb = (struct lpfc_iocbq *) arg; - lpfc_rcv_logo(phba, ndlp, cmdiocb, ELS_CMD_LOGO); + lpfc_rcv_logo(phba, ndlp, cmdiocb); return ndlp->nlp_state; } @@ -1641,16 +1617,9 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb, *rspiocb; - IOCB_t *irsp; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; - - irsp = &rspiocb->iocb; - if (irsp->ulpStatus) { - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - return NLP_STE_FREED_NODE; - } return ndlp->nlp_state; } @@ -1659,16 +1628,9 @@ lpfc_cmpl_prli_npr_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { struct lpfc_iocbq *cmdiocb, *rspiocb; - IOCB_t *irsp; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; - - irsp = &rspiocb->iocb; - if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - return NLP_STE_FREED_NODE; - } return ndlp->nlp_state; } @@ -1687,16 +1649,9 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_hba * phba, uint32_t evt) { struct lpfc_iocbq *cmdiocb, *rspiocb; - IOCB_t *irsp; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; - - irsp = &rspiocb->iocb; - if (irsp->ulpStatus && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - return NLP_STE_FREED_NODE; - } return ndlp->nlp_state; } @@ -1713,12 +1668,7 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_hba * phba, if (!mb->mbxStatus) ndlp->nlp_rpi = mb->un.varWords[0]; - else { - if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { - lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); - return NLP_STE_FREED_NODE; - } - } + return ndlp->nlp_state; } @@ -1727,10 +1677,6 @@ lpfc_device_rm_npr_node(struct lpfc_hba * phba, struct lpfc_nodelist * ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - return ndlp->nlp_state; - } lpfc_nlp_list(phba, ndlp, NLP_NO_LIST); return NLP_STE_FREED_NODE; } @@ -1741,7 +1687,7 @@ lpfc_device_recov_npr_node(struct lpfc_hba * phba, uint32_t evt) { spin_lock_irq(phba->host->host_lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); + ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; spin_unlock_irq(phba->host->host_lock); if (ndlp->nlp_flag & NLP_DELAY_TMO) { lpfc_cancel_retry_delay_tmo(phba, ndlp); diff --git a/trunk/drivers/scsi/lpfc/lpfc_scsi.c b/trunk/drivers/scsi/lpfc/lpfc_scsi.c index 7dc4c2e6bed2..f93799873721 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_scsi.c +++ b/trunk/drivers/scsi/lpfc/lpfc_scsi.c @@ -629,7 +629,8 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq; IOCB_t *piocb; struct fcp_cmnd *fcp_cmnd; - struct lpfc_rport_data *rdata = lpfc_cmd->rdata; + struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device; + struct lpfc_rport_data *rdata = scsi_dev->hostdata; struct lpfc_nodelist *ndlp = rdata->pnode; if ((ndlp == NULL) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) { @@ -664,18 +665,56 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba, piocb->ulpTimeout = lpfc_cmd->timeout; } + lpfc_cmd->rdata = rdata; + + switch (task_mgmt_cmd) { + case FCP_LUN_RESET: + /* Issue LUN Reset to TGT LUN */ + lpfc_printf_log(phba, + KERN_INFO, + LOG_FCP, + "%d:0703 Issue LUN Reset to TGT %d LUN %d " + "Data: x%x x%x\n", + phba->brd_no, + scsi_dev->id, scsi_dev->lun, + ndlp->nlp_rpi, ndlp->nlp_flag); + + break; + case FCP_ABORT_TASK_SET: + /* Issue Abort Task Set to TGT LUN */ + lpfc_printf_log(phba, + KERN_INFO, + LOG_FCP, + "%d:0701 Issue Abort Task Set to TGT %d LUN %d " + "Data: x%x x%x\n", + phba->brd_no, + scsi_dev->id, scsi_dev->lun, + ndlp->nlp_rpi, ndlp->nlp_flag); + + break; + case FCP_TARGET_RESET: + /* Issue Target Reset to TGT */ + lpfc_printf_log(phba, + KERN_INFO, + LOG_FCP, + "%d:0702 Issue Target Reset to TGT %d " + "Data: x%x x%x\n", + phba->brd_no, + scsi_dev->id, ndlp->nlp_rpi, + ndlp->nlp_flag); + break; + } + return (1); } static int -lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, - unsigned tgt_id, struct lpfc_rport_data *rdata) +lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba) { struct lpfc_iocbq *iocbq; struct lpfc_iocbq *iocbqrsp; int ret; - lpfc_cmd->rdata = rdata; ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET); if (!ret) return FAILED; @@ -687,13 +726,6 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba, if (!iocbqrsp) return FAILED; - /* Issue Target Reset to TGT */ - lpfc_printf_log(phba, KERN_INFO, LOG_FCP, - "%d:0702 Issue Target Reset to TGT %d " - "Data: x%x x%x\n", - phba->brd_no, tgt_id, rdata->pnode->nlp_rpi, - rdata->pnode->nlp_flag); - ret = lpfc_sli_issue_iocb_wait(phba, &phba->sli.ring[phba->sli.fcp_ring], iocbq, iocbqrsp, lpfc_cmd->timeout); @@ -989,7 +1021,6 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) lpfc_cmd->pCmd = cmnd; lpfc_cmd->timeout = 60; lpfc_cmd->scsi_hba = phba; - lpfc_cmd->rdata = rdata; ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET); if (!ret) @@ -1002,11 +1033,6 @@ lpfc_reset_lun_handler(struct scsi_cmnd *cmnd) if (iocbqrsp == NULL) goto out_free_scsi_buf; - lpfc_printf_log(phba, KERN_INFO, LOG_FCP, - "%d:0703 Issue LUN Reset to TGT %d LUN %d " - "Data: x%x x%x\n", phba->brd_no, cmnd->device->id, - cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); - ret = lpfc_sli_issue_iocb_wait(phba, &phba->sli.ring[phba->sli.fcp_ring], iocbq, iocbqrsp, lpfc_cmd->timeout); @@ -1078,6 +1104,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) int match; int ret = FAILED, i, err_count = 0; int cnt, loopcnt; + unsigned int midlayer_id = 0; struct lpfc_scsi_buf * lpfc_cmd; lpfc_block_requests(phba); @@ -1097,6 +1124,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) * targets known to the driver. Should any target reset * fail, this routine returns failure to the midlayer. */ + midlayer_id = cmnd->device->id; for (i = 0; i < MAX_FCP_TARGET; i++) { /* Search the mapped list for this target ID */ match = 0; @@ -1109,8 +1137,9 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) if (!match) continue; - ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba, - i, ndlp->rport->dd_data); + lpfc_cmd->pCmd->device->id = i; + lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data; + ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba); if (ret != SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "%d:0713 Bus Reset on target %d failed\n", @@ -1129,6 +1158,7 @@ lpfc_reset_bus_handler(struct scsi_cmnd *cmnd) * the targets. Unfortunately, some targets do not abide by * this forcing the driver to double check. */ + cmnd->device->id = midlayer_id; cnt = lpfc_sli_sum_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring], 0, 0, LPFC_CTX_HOST); if (cnt) diff --git a/trunk/drivers/scsi/lpfc/lpfc_version.h b/trunk/drivers/scsi/lpfc/lpfc_version.h index 6b737568b831..4cf1366108b7 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_version.h +++ b/trunk/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.1.6" +#define LPFC_DRIVER_VERSION "8.1.4" #define LPFC_DRIVER_NAME "lpfc" diff --git a/trunk/drivers/scsi/megaraid.c b/trunk/drivers/scsi/megaraid.c index de35ffe2f79d..80b68a2481b3 100644 --- a/trunk/drivers/scsi/megaraid.c +++ b/trunk/drivers/scsi/megaraid.c @@ -4471,6 +4471,7 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru) { Scsi_Cmnd *scmd; struct scsi_device *sdev; + unsigned long flags = 0; scb_t *scb; int rval; diff --git a/trunk/drivers/scsi/megaraid/megaraid_mbox.c b/trunk/drivers/scsi/megaraid/megaraid_mbox.c index bec1424eda85..c11e5ce6865e 100644 --- a/trunk/drivers/scsi/megaraid/megaraid_mbox.c +++ b/trunk/drivers/scsi/megaraid/megaraid_mbox.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. * * FILE : megaraid_mbox.c - * Version : v2.20.4.8 (Apr 11 2006) + * Version : v2.20.4.7 (Nov 14 2005) * * Authors: * Atul Mukker @@ -2278,7 +2278,6 @@ megaraid_mbox_dpc(unsigned long devp) unsigned long flags; uint8_t c; int status; - uioc_t *kioc; if (!adapter) return; @@ -2321,9 +2320,6 @@ megaraid_mbox_dpc(unsigned long devp) // remove from local clist list_del_init(&scb->list); - kioc = (uioc_t *)scb->gp; - kioc->status = 0; - megaraid_mbox_mm_done(adapter, scb); continue; @@ -2640,7 +2636,6 @@ megaraid_reset_handler(struct scsi_cmnd *scp) int recovery_window; int recovering; int i; - uioc_t *kioc; adapter = SCP2ADAPTER(scp); raid_dev = ADAP2RAIDDEV(adapter); @@ -2660,51 +2655,32 @@ megaraid_reset_handler(struct scsi_cmnd *scp) // Also, reset all the commands currently owned by the driver spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags); list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) { - list_del_init(&scb->list); // from pending list - - if (scb->sno >= MBOX_MAX_SCSI_CMDS) { - con_log(CL_ANN, (KERN_WARNING - "megaraid: IOCTL packet with %d[%d:%d] being reset\n", - scb->sno, scb->dev_channel, scb->dev_target)); - - scb->status = -1; - kioc = (uioc_t *)scb->gp; - kioc->status = -EFAULT; + list_del_init(&scb->list); // from pending list - megaraid_mbox_mm_done(adapter, scb); - } else { - if (scb->scp == scp) { // Found command - con_log(CL_ANN, (KERN_WARNING - "megaraid: %ld:%d[%d:%d], reset from pending list\n", - scp->serial_number, scb->sno, - scb->dev_channel, scb->dev_target)); - } else { - con_log(CL_ANN, (KERN_WARNING - "megaraid: IO packet with %d[%d:%d] being reset\n", - scb->sno, scb->dev_channel, scb->dev_target)); - } + con_log(CL_ANN, (KERN_WARNING + "megaraid: %ld:%d[%d:%d], reset from pending list\n", + scp->serial_number, scb->sno, + scb->dev_channel, scb->dev_target)); - scb->scp->result = (DID_RESET << 16); - scb->scp->scsi_done(scb->scp); + scp->result = (DID_RESET << 16); + scp->scsi_done(scp); - megaraid_dealloc_scb(adapter, scb); - } + megaraid_dealloc_scb(adapter, scb); } spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags); if (adapter->outstanding_cmds) { con_log(CL_ANN, (KERN_NOTICE "megaraid: %d outstanding commands. Max wait %d sec\n", - adapter->outstanding_cmds, - (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT))); + adapter->outstanding_cmds, MBOX_RESET_WAIT)); } recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT; recovering = adapter->outstanding_cmds; - for (i = 0; i < recovery_window; i++) { + for (i = 0; i < recovery_window && adapter->outstanding_cmds; i++) { megaraid_ack_sequence(adapter); @@ -2713,11 +2689,12 @@ megaraid_reset_handler(struct scsi_cmnd *scp) con_log(CL_ANN, ( "megaraid mbox: Wait for %d commands to complete:%d\n", adapter->outstanding_cmds, - (MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i)); + MBOX_RESET_WAIT - i)); } // bailout if no recovery happended in reset time - if (adapter->outstanding_cmds == 0) { + if ((i == MBOX_RESET_WAIT) && + (recovering == adapter->outstanding_cmds)) { break; } @@ -2941,13 +2918,12 @@ mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[]) wmb(); WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1); - for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) { + for (i = 0; i < 0xFFFFF; i++) { if (mbox->numstatus != 0xFF) break; rmb(); - udelay(MBOX_SYNC_DELAY_200); } - if (i == MBOX_SYNC_WAIT_CNT) { + if (i == 0xFFFFF) { // We may need to re-calibrate the counter con_log(CL_ANN, (KERN_CRIT "megaraid: fast sync command timed out\n")); @@ -3499,7 +3475,7 @@ megaraid_cmm_register(adapter_t *adapter) adp.drvr_data = (unsigned long)adapter; adp.pdev = adapter->pdev; adp.issue_uioc = megaraid_mbox_mm_handler; - adp.timeout = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT; + adp.timeout = 300; adp.max_kioc = MBOX_MAX_USER_CMDS; if ((rval = mraid_mm_register_adp(&adp)) != 0) { @@ -3726,6 +3702,7 @@ megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb) unsigned long flags; kioc = (uioc_t *)scb->gp; + kioc->status = 0; mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf; mbox64->mbox32.status = scb->status; raw_mbox = (uint8_t *)&mbox64->mbox32; diff --git a/trunk/drivers/scsi/megaraid/megaraid_mbox.h b/trunk/drivers/scsi/megaraid/megaraid_mbox.h index 868fb0ec93e7..882fb1a0b575 100644 --- a/trunk/drivers/scsi/megaraid/megaraid_mbox.h +++ b/trunk/drivers/scsi/megaraid/megaraid_mbox.h @@ -21,8 +21,8 @@ #include "megaraid_ioctl.h" -#define MEGARAID_VERSION "2.20.4.8" -#define MEGARAID_EXT_VERSION "(Release Date: Mon Apr 11 12:27:22 EST 2006)" +#define MEGARAID_VERSION "2.20.4.7" +#define MEGARAID_EXT_VERSION "(Release Date: Mon Nov 14 12:27:22 EST 2005)" /* @@ -100,9 +100,6 @@ #define MBOX_BUSY_WAIT 10 // max usec to wait for busy mailbox #define MBOX_RESET_WAIT 180 // wait these many seconds in reset #define MBOX_RESET_EXT_WAIT 120 // extended wait reset -#define MBOX_SYNC_WAIT_CNT 0xFFFF // wait loop index for synchronous mode - -#define MBOX_SYNC_DELAY_200 200 // 200 micro-seconds /* * maximum transfer that can happen through the firmware commands issued diff --git a/trunk/drivers/scsi/megaraid/megaraid_mm.c b/trunk/drivers/scsi/megaraid/megaraid_mm.c index e8f534fb336b..8f3ce0432295 100644 --- a/trunk/drivers/scsi/megaraid/megaraid_mm.c +++ b/trunk/drivers/scsi/megaraid/megaraid_mm.c @@ -898,8 +898,10 @@ mraid_mm_register_adp(mraid_mmadp_t *lld_adp) adapter = kmalloc(sizeof(mraid_mmadp_t), GFP_KERNEL); - if (!adapter) - return -ENOMEM; + if (!adapter) { + rval = -ENOMEM; + goto memalloc_error; + } memset(adapter, 0, sizeof(mraid_mmadp_t)); diff --git a/trunk/drivers/scsi/qla2xxx/qla_os.c b/trunk/drivers/scsi/qla2xxx/qla_os.c index 584fe5d8e507..017729c59a49 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_os.c +++ b/trunk/drivers/scsi/qla2xxx/qla_os.c @@ -599,7 +599,6 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *ha) * Either SUCCESS or FAILED. * * Note: -* Only return FAILED if command not returned by firmware. **************************************************************************/ int qla2xxx_eh_abort(struct scsi_cmnd *cmd) @@ -610,12 +609,11 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) unsigned int id, lun; unsigned long serial; unsigned long flags; - int wait = 0; if (!CMD_SP(cmd)) - return SUCCESS; + return FAILED; - ret = SUCCESS; + ret = FAILED; id = cmd->device->id; lun = cmd->device->lun; @@ -644,7 +642,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) } else { DEBUG3(printk("%s(%ld): abort_command " "mbx success.\n", __func__, ha->host_no)); - wait = 1; + ret = SUCCESS; } spin_lock_irqsave(&ha->hardware_lock, flags); @@ -653,18 +651,17 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Wait for the command to be returned. */ - if (wait) { + if (ret == SUCCESS) { if (qla2x00_eh_wait_on_command(ha, cmd) != QLA_SUCCESS) { qla_printk(KERN_ERR, ha, "scsi(%ld:%d:%d): Abort handler timed out -- %lx " "%x.\n", ha->host_no, id, lun, serial, ret); - ret = FAILED; } } qla_printk(KERN_INFO, ha, - "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n", - ha->host_no, id, lun, wait, serial, ret); + "scsi(%ld:%d:%d): Abort command issued -- %lx %x.\n", ha->host_no, + id, lun, serial, ret); return ret; } @@ -1703,8 +1700,8 @@ qla2x00_free_device(scsi_qla_host_t *ha) ha->flags.online = 0; /* Detach interrupts */ - if (ha->host->irq) - free_irq(ha->host->irq, ha); + if (ha->pdev->irq) + free_irq(ha->pdev->irq, ha); /* release io space registers */ if (ha->iobase) diff --git a/trunk/drivers/scsi/scsi_devinfo.c b/trunk/drivers/scsi/scsi_devinfo.c index 941c1e15c899..c750d3399a97 100644 --- a/trunk/drivers/scsi/scsi_devinfo.c +++ b/trunk/drivers/scsi/scsi_devinfo.c @@ -56,8 +56,6 @@ static struct { {"DENON", "DRD-25X", "V", BLIST_NOLUN}, /* locks up */ {"HITACHI", "DK312C", "CM81", BLIST_NOLUN}, /* responds to all lun */ {"HITACHI", "DK314C", "CR21", BLIST_NOLUN}, /* responds to all lun */ - {"IBM", "2104-DU3", NULL, BLIST_NOLUN}, /* locks up */ - {"IBM", "2104-TU3", NULL, BLIST_NOLUN}, /* locks up */ {"IMS", "CDD521/10", "2.06", BLIST_NOLUN}, /* locks up */ {"MAXTOR", "XT-3280", "PR02", BLIST_NOLUN}, /* locks up */ {"MAXTOR", "XT-4380S", "B3C", BLIST_NOLUN}, /* locks up */ diff --git a/trunk/drivers/scsi/scsi_lib.c b/trunk/drivers/scsi/scsi_lib.c index 764a8b375ead..7b0f9a3810d2 100644 --- a/trunk/drivers/scsi/scsi_lib.c +++ b/trunk/drivers/scsi/scsi_lib.c @@ -1067,29 +1067,16 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes, break; case NOT_READY: /* - * If the device is in the process of becoming - * ready, or has a temporary blockage, retry. + * If the device is in the process of becoming ready, + * retry. */ - if (sshdr.asc == 0x04) { - switch (sshdr.ascq) { - case 0x01: /* becoming ready */ - case 0x04: /* format in progress */ - case 0x05: /* rebuild in progress */ - case 0x06: /* recalculation in progress */ - case 0x07: /* operation in progress */ - case 0x08: /* Long write in progress */ - case 0x09: /* self test in progress */ - scsi_requeue_command(q, cmd); - return; - default: - break; - } + if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) { + scsi_requeue_command(q, cmd); + return; } - if (!(req->flags & REQ_QUIET)) { + if (!(req->flags & REQ_QUIET)) scmd_printk(KERN_INFO, cmd, - "Device not ready: "); - scsi_print_sense_hdr("", &sshdr); - } + "Device not ready.\n"); scsi_end_request(cmd, 0, this_count, 1); return; case VOLUME_OVERFLOW: diff --git a/trunk/drivers/scsi/sim710.c b/trunk/drivers/scsi/sim710.c index 255886a9ac55..3274ab76c8d3 100644 --- a/trunk/drivers/scsi/sim710.c +++ b/trunk/drivers/scsi/sim710.c @@ -75,7 +75,7 @@ param_setup(char *str) else if(!strncmp(pos, "id:", 3)) { if(slot == -1) { printk(KERN_WARNING "sim710: Must specify slot for id parameter\n"); - } else if(slot >= MAX_SLOTS) { + } else if(slot > MAX_SLOTS) { printk(KERN_WARNING "sim710: Illegal slot %d for id %d\n", slot, val); } else { id_array[slot] = val; diff --git a/trunk/include/linux/dma-mapping.h b/trunk/include/linux/dma-mapping.h index 635690cf3e3d..ff61817082fa 100644 --- a/trunk/include/linux/dma-mapping.h +++ b/trunk/include/linux/dma-mapping.h @@ -14,7 +14,6 @@ enum dma_data_direction { }; #define DMA_64BIT_MASK 0xffffffffffffffffULL -#define DMA_48BIT_MASK 0x0000ffffffffffffULL #define DMA_40BIT_MASK 0x000000ffffffffffULL #define DMA_39BIT_MASK 0x0000007fffffffffULL #define DMA_32BIT_MASK 0x00000000ffffffffULL diff --git a/trunk/include/scsi/srp.h b/trunk/include/scsi/srp.h index 637f77eccf0c..6c2681dc5b46 100644 --- a/trunk/include/scsi/srp.h +++ b/trunk/include/scsi/srp.h @@ -95,15 +95,14 @@ struct srp_direct_buf { /* * We need the packed attribute because the SRP spec puts the list of - * descriptors at an offset of 20, which is not aligned to the size of - * struct srp_direct_buf. The whole structure must be packed to avoid - * having the 20-byte structure padded to 24 bytes on 64-bit architectures. + * descriptors at an offset of 20, which is not aligned to the size + * of struct srp_direct_buf. */ struct srp_indirect_buf { struct srp_direct_buf table_desc; __be32 len; - struct srp_direct_buf desc_list[0]; -} __attribute__((packed)); + struct srp_direct_buf desc_list[0] __attribute__((packed)); +}; enum { SRP_MULTICHAN_SINGLE = 0, @@ -123,11 +122,6 @@ struct srp_login_req { u8 target_port_id[16]; }; -/* - * The SRP spec defines the size of the LOGIN_RSP structure to be 52 - * bytes, so it needs to be packed to avoid having it padded to 56 - * bytes on 64-bit architectures. - */ struct srp_login_rsp { u8 opcode; u8 reserved1[3]; @@ -138,7 +132,7 @@ struct srp_login_rsp { __be16 buf_fmt; u8 rsp_flags; u8 reserved2[25]; -} __attribute__((packed)); +}; struct srp_login_rej { u8 opcode; @@ -213,11 +207,6 @@ enum { SRP_RSP_FLAG_DIUNDER = 1 << 5 }; -/* - * The SRP spec defines the size of the RSP structure to be 36 bytes, - * so it needs to be packed to avoid having it padded to 40 bytes on - * 64-bit architectures. - */ struct srp_rsp { u8 opcode; u8 sol_not; @@ -232,6 +221,6 @@ struct srp_rsp { __be32 sense_data_len; __be32 resp_data_len; u8 data[0]; -} __attribute__((packed)); +}; #endif /* SCSI_SRP_H */ diff --git a/trunk/kernel/ptrace.c b/trunk/kernel/ptrace.c index 921c22ad16e4..b0f8da80d7d4 100644 --- a/trunk/kernel/ptrace.c +++ b/trunk/kernel/ptrace.c @@ -155,26 +155,8 @@ int ptrace_attach(struct task_struct *task) if (task->tgid == current->tgid) goto out; -repeat: - /* - * Nasty, nasty. - * - * We want to hold both the task-lock and the - * tasklist_lock for writing at the same time. - * But that's against the rules (tasklist_lock - * is taken for reading by interrupts on other - * cpu's that may have task_lock). - */ + write_lock_irq(&tasklist_lock); task_lock(task); - local_irq_disable(); - if (!write_trylock(&tasklist_lock)) { - local_irq_enable(); - task_unlock(task); - do { - cpu_relax(); - } while (!write_can_lock(&tasklist_lock)); - goto repeat; - } /* the same process cannot be attached many times */ if (task->ptrace & PT_PTRACED)