From afcf0a20822b7bbe906971f647475a6b91c0db0a Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Fri, 12 Jun 2009 10:33:53 +0300 Subject: [PATCH] --- yaml --- r: 148168 b: refs/heads/master c: ca371c0d7e23d0d0afae65fc83a0e91cf7399573 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/Documentation/ide/ide.txt | 2 - trunk/Documentation/kernel-parameters.txt | 7 +- trunk/Documentation/lguest/Makefile | 3 +- trunk/Documentation/lguest/lguest.c | 1008 ++- trunk/Documentation/lguest/lguest.txt | 1 + trunk/arch/alpha/mm/extable.c | 21 - trunk/arch/avr32/kernel/module.c | 2 + trunk/arch/cris/kernel/module.c | 2 + trunk/arch/frv/kernel/module.c | 2 + trunk/arch/h8300/kernel/module.c | 2 + trunk/arch/ia64/mm/extable.c | 26 - trunk/arch/m32r/kernel/module.c | 2 + trunk/arch/m68k/kernel/module.c | 2 + trunk/arch/m68knommu/kernel/module.c | 2 + trunk/arch/mips/kernel/module.c | 2 + trunk/arch/mn10300/kernel/module.c | 2 + trunk/arch/parisc/kernel/module.c | 2 + trunk/arch/powerpc/kernel/module.c | 2 + trunk/arch/s390/kernel/module.c | 2 + trunk/arch/sh/kernel/module.c | 2 + trunk/arch/sparc/include/asm/uaccess_32.h | 3 - trunk/arch/sparc/kernel/module.c | 2 + trunk/arch/sparc/mm/extable.c | 29 - trunk/arch/um/include/asm/pgtable.h | 7 +- trunk/arch/um/sys-i386/Makefile | 2 +- trunk/arch/um/sys-x86_64/Makefile | 4 +- trunk/arch/um/sys-x86_64/um_module.c | 21 + trunk/arch/x86/include/asm/lguest.h | 7 +- trunk/arch/x86/include/asm/lguest_hcall.h | 15 +- trunk/arch/x86/include/asm/pgtable_32_types.h | 4 - trunk/arch/x86/kernel/Makefile | 2 +- trunk/arch/x86/kernel/asm-offsets_32.c | 1 - trunk/arch/x86/kernel/module_32.c | 152 + .../arch/x86/kernel/{module.c => module_64.c} | 82 +- trunk/arch/x86/kernel/setup.c | 15 +- trunk/arch/x86/kernel/vmlinux.lds.S | 2 - trunk/arch/x86/lguest/Kconfig | 1 + trunk/arch/x86/lguest/boot.c | 158 +- trunk/arch/x86/lguest/i386_head.S | 60 +- trunk/arch/xtensa/kernel/module.c | 2 + trunk/drivers/base/firmware_class.c | 129 +- trunk/drivers/block/virtio_blk.c | 10 +- trunk/drivers/char/hw_random/virtio-rng.c | 30 +- trunk/drivers/char/virtio_console.c | 26 +- trunk/drivers/ide/at91_ide.c | 7 +- trunk/drivers/ide/au1xxx-ide.c | 8 +- trunk/drivers/ide/buddha.c | 9 +- trunk/drivers/ide/cmd640.c | 7 +- trunk/drivers/ide/cs5520.c | 4 +- trunk/drivers/ide/delkin_cb.c | 6 +- trunk/drivers/ide/falconide.c | 9 +- trunk/drivers/ide/gayle.c | 9 +- trunk/drivers/ide/hpt366.c | 25 +- trunk/drivers/ide/icside.c | 77 +- trunk/drivers/ide/ide-4drives.c | 6 +- trunk/drivers/ide/ide-atapi.c | 2 +- trunk/drivers/ide/ide-cs.c | 6 +- trunk/drivers/ide/ide-disk.c | 75 +- trunk/drivers/ide/ide-dma.c | 1 + trunk/drivers/ide/ide-eh.c | 14 +- trunk/drivers/ide/ide-gd.c | 14 - trunk/drivers/ide/ide-generic.c | 7 +- trunk/drivers/ide/ide-h8300.c | 10 +- trunk/drivers/ide/ide-io.c | 77 +- trunk/drivers/ide/ide-iops.c | 26 +- trunk/drivers/ide/ide-legacy.c | 7 +- trunk/drivers/ide/ide-pnp.c | 6 +- trunk/drivers/ide/ide-probe.c | 95 +- trunk/drivers/ide/ide-tape.c | 90 +- trunk/drivers/ide/ide-taskfile.c | 3 +- trunk/drivers/ide/ide.c | 10 - trunk/drivers/ide/ide_platform.c | 9 +- trunk/drivers/ide/macide.c | 9 +- trunk/drivers/ide/palm_bk3710.c | 6 +- trunk/drivers/ide/pdc202xx_new.c | 26 + trunk/drivers/ide/pdc202xx_old.c | 92 +- trunk/drivers/ide/pmac.c | 13 +- trunk/drivers/ide/q40ide.c | 11 +- trunk/drivers/ide/rapide.c | 8 +- trunk/drivers/ide/scc_pata.c | 6 +- trunk/drivers/ide/setup-pci.c | 85 +- trunk/drivers/ide/sgiioc4.c | 7 +- trunk/drivers/ide/siimage.c | 4 +- trunk/drivers/ide/sl82c105.c | 9 +- trunk/drivers/ide/tx4938ide.c | 5 +- trunk/drivers/ide/tx4939ide.c | 5 +- .../drivers/infiniband/ulp/iser/iscsi_iser.c | 10 +- trunk/drivers/lguest/Kconfig | 2 +- trunk/drivers/lguest/core.c | 30 +- trunk/drivers/lguest/hypercalls.c | 14 - trunk/drivers/lguest/interrupts_and_traps.c | 57 +- trunk/drivers/lguest/lg.h | 28 +- trunk/drivers/lguest/lguest_device.c | 41 +- trunk/drivers/lguest/lguest_user.c | 127 +- trunk/drivers/lguest/page_tables.c | 396 +- trunk/drivers/lguest/segments.c | 2 +- trunk/drivers/message/fusion/mptbase.c | 1571 ++-- trunk/drivers/message/fusion/mptbase.h | 180 +- trunk/drivers/message/fusion/mptctl.c | 692 +- trunk/drivers/message/fusion/mptdebug.h | 3 - trunk/drivers/message/fusion/mptfc.c | 15 +- trunk/drivers/message/fusion/mptsas.c | 3136 ++------ trunk/drivers/message/fusion/mptsas.h | 41 +- trunk/drivers/message/fusion/mptscsih.c | 1329 ++-- trunk/drivers/message/fusion/mptscsih.h | 7 +- trunk/drivers/message/fusion/mptspi.c | 71 +- trunk/drivers/net/Kconfig | 11 - trunk/drivers/net/Makefile | 1 - trunk/drivers/net/bnx2.c | 193 +- trunk/drivers/net/bnx2.h | 18 - trunk/drivers/net/cnic.c | 2711 ------- trunk/drivers/net/cnic.h | 299 - trunk/drivers/net/cnic_defs.h | 580 -- trunk/drivers/net/cnic_if.h | 299 - trunk/drivers/net/virtio_net.c | 45 +- trunk/drivers/s390/kvm/kvm_virtio.c | 43 +- trunk/drivers/s390/scsi/zfcp_ccw.c | 30 +- trunk/drivers/s390/scsi/zfcp_dbf.c | 10 +- trunk/drivers/s390/scsi/zfcp_def.h | 7 + trunk/drivers/s390/scsi/zfcp_erp.c | 8 +- trunk/drivers/s390/scsi/zfcp_ext.h | 1 - trunk/drivers/s390/scsi/zfcp_fc.c | 7 +- trunk/drivers/s390/scsi/zfcp_fsf.c | 29 +- trunk/drivers/s390/scsi/zfcp_scsi.c | 13 +- trunk/drivers/scsi/Kconfig | 31 +- trunk/drivers/scsi/Makefile | 3 +- trunk/drivers/scsi/NCR_D700.c | 2 +- .../drivers/scsi/bnx2i/57xx_iscsi_constants.h | 155 - trunk/drivers/scsi/bnx2i/57xx_iscsi_hsi.h | 1509 ---- trunk/drivers/scsi/bnx2i/Kconfig | 7 - trunk/drivers/scsi/bnx2i/Makefile | 3 - trunk/drivers/scsi/bnx2i/bnx2i.h | 771 -- trunk/drivers/scsi/bnx2i/bnx2i_hwi.c | 2405 ------ trunk/drivers/scsi/bnx2i/bnx2i_init.c | 438 -- trunk/drivers/scsi/bnx2i/bnx2i_iscsi.c | 2064 ----- trunk/drivers/scsi/bnx2i/bnx2i_sysfs.c | 142 - trunk/drivers/scsi/cxgb3i/cxgb3i.h | 1 + trunk/drivers/scsi/cxgb3i/cxgb3i_iscsi.c | 26 +- trunk/drivers/scsi/cxgb3i/cxgb3i_offload.c | 23 +- trunk/drivers/scsi/cxgb3i/cxgb3i_offload.h | 3 +- .../scsi/device_handler/scsi_dh_rdac.c | 6 - trunk/drivers/scsi/fcoe/fcoe.c | 95 +- trunk/drivers/scsi/fcoe/fcoe.h | 1 - trunk/drivers/scsi/fcoe/libfcoe.c | 21 +- trunk/drivers/scsi/fnic/fnic_main.c | 1 - trunk/drivers/scsi/gdth_proc.c | 5 +- trunk/drivers/scsi/ibmvscsi/ibmvfc.c | 434 +- trunk/drivers/scsi/ibmvscsi/ibmvfc.h | 40 +- trunk/drivers/scsi/ibmvscsi/ibmvscsi.c | 463 +- trunk/drivers/scsi/ibmvscsi/ibmvscsi.h | 4 - trunk/drivers/scsi/ibmvscsi/viosrp.h | 68 +- trunk/drivers/scsi/ipr.c | 5 +- trunk/drivers/scsi/libfc/fc_exch.c | 4 - trunk/drivers/scsi/libfc/fc_fcp.c | 2 +- trunk/drivers/scsi/libfc/fc_rport.c | 6 +- trunk/drivers/scsi/libiscsi.c | 468 +- trunk/drivers/scsi/libiscsi_tcp.c | 18 +- trunk/drivers/scsi/lpfc/lpfc.h | 123 +- trunk/drivers/scsi/lpfc/lpfc_attr.c | 250 +- trunk/drivers/scsi/lpfc/lpfc_crtn.h | 63 +- trunk/drivers/scsi/lpfc/lpfc_ct.c | 15 +- trunk/drivers/scsi/lpfc/lpfc_debugfs.c | 21 +- trunk/drivers/scsi/lpfc/lpfc_disc.h | 1 - trunk/drivers/scsi/lpfc/lpfc_els.c | 275 +- trunk/drivers/scsi/lpfc/lpfc_hbadisc.c | 1365 +--- trunk/drivers/scsi/lpfc/lpfc_hw.h | 142 +- trunk/drivers/scsi/lpfc/lpfc_hw4.h | 2141 ------ trunk/drivers/scsi/lpfc/lpfc_init.c | 6050 ++------------- trunk/drivers/scsi/lpfc/lpfc_logmsg.h | 54 +- trunk/drivers/scsi/lpfc/lpfc_mbox.c | 674 +- trunk/drivers/scsi/lpfc/lpfc_mem.c | 206 +- trunk/drivers/scsi/lpfc/lpfc_nportdisc.c | 51 +- trunk/drivers/scsi/lpfc/lpfc_scsi.c | 930 +-- trunk/drivers/scsi/lpfc/lpfc_scsi.h | 2 - trunk/drivers/scsi/lpfc/lpfc_sli.c | 6813 ++--------------- trunk/drivers/scsi/lpfc/lpfc_sli.h | 29 +- trunk/drivers/scsi/lpfc/lpfc_sli4.h | 467 -- trunk/drivers/scsi/lpfc/lpfc_version.h | 2 +- trunk/drivers/scsi/lpfc/lpfc_vport.c | 62 +- trunk/drivers/scsi/mpt2sas/mpt2sas_base.h | 5 +- trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.c | 32 +- trunk/drivers/scsi/mpt2sas/mpt2sas_scsih.c | 363 +- .../drivers/scsi/mpt2sas/mpt2sas_transport.c | 36 +- trunk/drivers/scsi/mvsas.c | 3222 ++++++++ trunk/drivers/scsi/mvsas/Kconfig | 42 - trunk/drivers/scsi/mvsas/Makefile | 32 - trunk/drivers/scsi/mvsas/mv_64xx.c | 793 -- trunk/drivers/scsi/mvsas/mv_64xx.h | 151 - trunk/drivers/scsi/mvsas/mv_94xx.c | 672 -- trunk/drivers/scsi/mvsas/mv_94xx.h | 222 - trunk/drivers/scsi/mvsas/mv_chips.h | 280 - trunk/drivers/scsi/mvsas/mv_defs.h | 502 -- trunk/drivers/scsi/mvsas/mv_init.c | 703 -- trunk/drivers/scsi/mvsas/mv_sas.c | 2154 ------ trunk/drivers/scsi/mvsas/mv_sas.h | 406 - trunk/drivers/scsi/osd/Kbuild | 25 + trunk/drivers/scsi/osd/Makefile | 37 + trunk/drivers/scsi/osd/osd_initiator.c | 83 +- trunk/drivers/scsi/osd/osd_uld.c | 66 +- trunk/drivers/scsi/qla1280.c | 387 +- trunk/drivers/scsi/qla1280.h | 3 +- trunk/drivers/scsi/qla2xxx/qla_attr.c | 227 +- trunk/drivers/scsi/qla2xxx/qla_dbg.c | 13 +- trunk/drivers/scsi/qla2xxx/qla_def.h | 45 +- trunk/drivers/scsi/qla2xxx/qla_fw.h | 6 +- trunk/drivers/scsi/qla2xxx/qla_gbl.h | 43 +- trunk/drivers/scsi/qla2xxx/qla_gs.c | 5 +- trunk/drivers/scsi/qla2xxx/qla_init.c | 206 +- trunk/drivers/scsi/qla2xxx/qla_iocb.c | 55 +- trunk/drivers/scsi/qla2xxx/qla_isr.c | 240 +- trunk/drivers/scsi/qla2xxx/qla_mbx.c | 244 +- trunk/drivers/scsi/qla2xxx/qla_mid.c | 118 +- trunk/drivers/scsi/qla2xxx/qla_os.c | 294 +- trunk/drivers/scsi/qla2xxx/qla_sup.c | 47 +- trunk/drivers/scsi/qla2xxx/qla_version.h | 2 +- trunk/drivers/scsi/scsi.c | 4 +- trunk/drivers/scsi/scsi_debug.c | 2 +- trunk/drivers/scsi/scsi_error.c | 21 +- trunk/drivers/scsi/scsi_lib.c | 14 +- trunk/drivers/scsi/scsi_scan.c | 4 +- trunk/drivers/scsi/scsi_transport_iscsi.c | 173 +- trunk/drivers/scsi/sd.c | 45 +- trunk/drivers/scsi/st.c | 2 +- trunk/drivers/scsi/sym53c8xx_2/sym_glue.c | 66 +- trunk/drivers/scsi/sym53c8xx_2/sym_hipd.c | 49 +- trunk/drivers/scsi/sym53c8xx_2/sym_hipd.h | 2 - trunk/drivers/video/aty/aty128fb.c | 2 +- trunk/drivers/video/cyber2000fb.c | 9 +- trunk/drivers/video/uvesafb.c | 10 +- trunk/drivers/virtio/virtio.c | 29 +- trunk/drivers/virtio/virtio_balloon.c | 27 +- trunk/drivers/virtio/virtio_pci.c | 307 +- trunk/drivers/virtio/virtio_ring.c | 102 +- trunk/fs/Kconfig | 10 - trunk/fs/eventfd.c | 3 - trunk/fs/exofs/common.h | 6 + trunk/fs/exofs/inode.c | 8 +- trunk/fs/exofs/osd.c | 26 + trunk/fs/fuse/Makefile | 1 - trunk/fs/fuse/cuse.c | 610 -- trunk/fs/fuse/dev.c | 15 +- trunk/fs/fuse/dir.c | 33 +- trunk/fs/fuse/file.c | 346 +- trunk/fs/fuse/fuse_i.h | 47 +- trunk/fs/fuse/inode.c | 118 +- trunk/fs/gfs2/Makefile | 1 - trunk/fs/gfs2/bmap.c | 3 - trunk/fs/gfs2/glock.c | 12 +- trunk/fs/gfs2/log.c | 9 +- trunk/fs/gfs2/lops.c | 3 - trunk/fs/gfs2/ops_fstype.c | 2 - trunk/fs/gfs2/rgrp.c | 11 +- trunk/fs/gfs2/super.c | 4 + trunk/fs/gfs2/trace_gfs2.h | 407 - trunk/fs/partitions/check.c | 42 +- trunk/include/linux/blkdev.h | 2 - trunk/include/linux/compiler.h | 5 - trunk/include/linux/fuse.h | 31 - trunk/include/linux/genhd.h | 1 - trunk/include/linux/ide.h | 46 +- trunk/include/linux/if_ether.h | 1 - trunk/include/linux/lguest.h | 4 - trunk/include/linux/lguest_launcher.h | 3 +- trunk/include/linux/module.h | 1 - trunk/include/linux/moduleparam.h | 40 +- trunk/include/linux/page_cgroup.h | 18 +- trunk/include/linux/virtio.h | 15 +- trunk/include/linux/virtio_config.h | 49 +- trunk/include/linux/virtio_pci.h | 10 +- trunk/include/linux/virtio_ring.h | 8 +- trunk/include/scsi/fc/fc_fip.h | 7 + trunk/include/scsi/iscsi_if.h | 49 +- trunk/include/scsi/libfc.h | 1 - trunk/include/scsi/libiscsi.h | 8 +- trunk/include/scsi/osd_attributes.h | 74 +- trunk/include/scsi/osd_initiator.h | 14 +- trunk/include/scsi/osd_protocol.h | 8 - trunk/include/scsi/scsi_transport_iscsi.h | 8 +- trunk/init/main.c | 5 + trunk/kernel/module.c | 1 - trunk/kernel/params.c | 46 +- trunk/kernel/sched.c | 1 - trunk/lib/extable.c | 21 +- trunk/mm/page_cgroup.c | 29 +- trunk/net/9p/trans_virtio.c | 6 +- trunk/scripts/mod/file2alias.c | 2 +- 287 files changed, 11469 insertions(+), 47173 deletions(-) create mode 100644 trunk/arch/um/sys-x86_64/um_module.c create mode 100644 trunk/arch/x86/kernel/module_32.c rename trunk/arch/x86/kernel/{module.c => module_64.c} (74%) delete mode 100644 trunk/drivers/net/cnic.c delete mode 100644 trunk/drivers/net/cnic.h delete mode 100644 trunk/drivers/net/cnic_defs.h delete mode 100644 trunk/drivers/net/cnic_if.h delete mode 100644 trunk/drivers/scsi/bnx2i/57xx_iscsi_constants.h delete mode 100644 trunk/drivers/scsi/bnx2i/57xx_iscsi_hsi.h delete mode 100644 trunk/drivers/scsi/bnx2i/Kconfig delete mode 100644 trunk/drivers/scsi/bnx2i/Makefile delete mode 100644 trunk/drivers/scsi/bnx2i/bnx2i.h delete mode 100644 trunk/drivers/scsi/bnx2i/bnx2i_hwi.c delete mode 100644 trunk/drivers/scsi/bnx2i/bnx2i_init.c delete mode 100644 trunk/drivers/scsi/bnx2i/bnx2i_iscsi.c delete mode 100644 trunk/drivers/scsi/bnx2i/bnx2i_sysfs.c delete mode 100644 trunk/drivers/scsi/lpfc/lpfc_hw4.h delete mode 100644 trunk/drivers/scsi/lpfc/lpfc_sli4.h create mode 100644 trunk/drivers/scsi/mvsas.c delete mode 100644 trunk/drivers/scsi/mvsas/Kconfig delete mode 100644 trunk/drivers/scsi/mvsas/Makefile delete mode 100644 trunk/drivers/scsi/mvsas/mv_64xx.c delete mode 100644 trunk/drivers/scsi/mvsas/mv_64xx.h delete mode 100644 trunk/drivers/scsi/mvsas/mv_94xx.c delete mode 100644 trunk/drivers/scsi/mvsas/mv_94xx.h delete mode 100644 trunk/drivers/scsi/mvsas/mv_chips.h delete mode 100644 trunk/drivers/scsi/mvsas/mv_defs.h delete mode 100644 trunk/drivers/scsi/mvsas/mv_init.c delete mode 100644 trunk/drivers/scsi/mvsas/mv_sas.c delete mode 100644 trunk/drivers/scsi/mvsas/mv_sas.h create mode 100755 trunk/drivers/scsi/osd/Makefile delete mode 100644 trunk/fs/fuse/cuse.c delete mode 100644 trunk/fs/gfs2/trace_gfs2.h diff --git a/[refs] b/[refs] index fe2877991c46..c11c0090c04c 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c9b8af00ff71f86ff3d092cc60ca673e1d0eae5b +refs/heads/master: ca371c0d7e23d0d0afae65fc83a0e91cf7399573 diff --git a/trunk/Documentation/ide/ide.txt b/trunk/Documentation/ide/ide.txt index e77bebfa7b0d..0c78f4b1d9d9 100644 --- a/trunk/Documentation/ide/ide.txt +++ b/trunk/Documentation/ide/ide.txt @@ -216,8 +216,6 @@ Other kernel parameters for ide_core are: * "noflush=[interface_number.device_number]" to disable flush requests -* "nohpa=[interface_number.device_number]" to disable Host Protected Area - * "noprobe=[interface_number.device_number]" to skip probing * "nowerr=[interface_number.device_number]" to ignore the WRERR_STAT bit diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index 0bf8a882ee9e..7bcdebffdab3 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -887,8 +887,11 @@ and is between 256 and 4096 characters. It is defined in the file ide-core.nodma= [HW] (E)IDE subsystem Format: =0.0 to prevent dma on hda, =0.1 hdb =1.0 hdc - .vlb_clock .pci_clock .noflush .nohpa .noprobe .nowerr - .cdrom .chs .ignore_cable are additional options + .vlb_clock .pci_clock .noflush .noprobe .nowerr .cdrom + .chs .ignore_cable are additional options + See Documentation/ide/ide.txt. + + idebus= [HW] (E)IDE subsystem - VLB/PCI bus speed See Documentation/ide/ide.txt. ide-pci-generic.all-generic-ide [HW] (E)IDE subsystem diff --git a/trunk/Documentation/lguest/Makefile b/trunk/Documentation/lguest/Makefile index 28c8cdfcafd8..1f4f9e888bd1 100644 --- a/trunk/Documentation/lguest/Makefile +++ b/trunk/Documentation/lguest/Makefile @@ -1,5 +1,6 @@ # This creates the demonstration utility "lguest" which runs a Linux guest. -CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -I../../include -I../../arch/x86/include -U_FORTIFY_SOURCE +CFLAGS:=-Wall -Wmissing-declarations -Wmissing-prototypes -O3 -I../../include -I../../arch/x86/include -U_FORTIFY_SOURCE +LDLIBS:=-lz all: lguest diff --git a/trunk/Documentation/lguest/lguest.c b/trunk/Documentation/lguest/lguest.c index 9ebcd6ef361b..d36fcc0f2715 100644 --- a/trunk/Documentation/lguest/lguest.c +++ b/trunk/Documentation/lguest/lguest.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -60,6 +59,7 @@ typedef uint8_t u8; /*:*/ #define PAGE_PRESENT 0x7 /* Present, RW, Execute */ +#define NET_PEERNUM 1 #define BRIDGE_PFX "bridge:" #ifndef SIOCBRADDIF #define SIOCBRADDIF 0x89a2 /* add interface to bridge */ @@ -76,12 +76,19 @@ static bool verbose; do { if (verbose) printf(args); } while(0) /*:*/ +/* File descriptors for the Waker. */ +struct { + int pipe[2]; + int lguest_fd; +} waker_fds; + /* The pointer to the start of guest memory. */ static void *guest_base; /* The maximum guest physical address allowed, and maximum possible. */ static unsigned long guest_limit, guest_max; -/* The /dev/lguest file descriptor. */ -static int lguest_fd; +/* The pipe for signal hander to write to. */ +static int timeoutpipe[2]; +static unsigned int timeout_usec = 500; /* a per-cpu variable indicating whose vcpu is currently running */ static unsigned int __thread cpu_id; @@ -89,6 +96,11 @@ static unsigned int __thread cpu_id; /* This is our list of devices. */ struct device_list { + /* Summary information about the devices in our list: ready to pass to + * select() to ask which need servicing.*/ + fd_set infds; + int max_infd; + /* Counter to assign interrupt numbers. */ unsigned int next_irq; @@ -114,21 +126,22 @@ struct device /* The linked-list pointer. */ struct device *next; - /* The device's descriptor, as mapped into the Guest. */ + /* The this device's descriptor, as mapped into the Guest. */ struct lguest_device_desc *desc; - /* We can't trust desc values once Guest has booted: we use these. */ - unsigned int feature_len; - unsigned int num_vq; - /* The name of this device, for --verbose. */ const char *name; + /* If handle_input is set, it wants to be called when this file + * descriptor is ready. */ + int fd; + bool (*handle_input)(int fd, struct device *me); + /* Any queues attached to this device */ struct virtqueue *vq; - /* Is it operational */ - bool running; + /* Handle status being finalized (ie. feature bits stable). */ + void (*ready)(struct device *me); /* Device-specific data. */ void *priv; @@ -151,28 +164,22 @@ struct virtqueue /* Last available index we saw. */ u16 last_avail_idx; - /* How many are used since we sent last irq? */ - unsigned int pending_used; + /* The routine to call when the Guest pings us, or timeout. */ + void (*handle_output)(int fd, struct virtqueue *me, bool timeout); - /* Eventfd where Guest notifications arrive. */ - int eventfd; + /* Outstanding buffers */ + unsigned int inflight; - /* Function for the thread which is servicing this virtqueue. */ - void (*service)(struct virtqueue *vq); - pid_t thread; + /* Is this blocked awaiting a timer? */ + bool blocked; }; /* Remember the arguments to the program so we can "reboot" */ static char **main_args; -/* The original tty settings to restore on exit. */ -static struct termios orig_term; - -/* We have to be careful with barriers: our devices are all run in separate - * threads and so we need to make sure that changes visible to the Guest happen - * in precise order. */ -#define wmb() __asm__ __volatile__("" : : : "memory") -#define mb() __asm__ __volatile__("" : : : "memory") +/* Since guest is UP and we don't run at the same time, we don't need barriers. + * But I include them in the code in case others copy it. */ +#define wmb() /* Convert an iovec element to the given type. * @@ -238,7 +245,7 @@ static void iov_consume(struct iovec iov[], unsigned num_iov, unsigned len) static u8 *get_feature_bits(struct device *dev) { return (u8 *)(dev->desc + 1) - + dev->num_vq * sizeof(struct lguest_vqconfig); + + dev->desc->num_vq * sizeof(struct lguest_vqconfig); } /*L:100 The Launcher code itself takes us out into userspace, that scary place @@ -498,19 +505,99 @@ static void concat(char *dst, char *args[]) * saw the arguments it expects when we looked at initialize() in lguest_user.c: * the base of Guest "physical" memory, the top physical page to allow and the * entry point for the Guest. */ -static void tell_kernel(unsigned long start) +static int tell_kernel(unsigned long start) { unsigned long args[] = { LHREQ_INITIALIZE, (unsigned long)guest_base, guest_limit / getpagesize(), start }; + int fd; + verbose("Guest: %p - %p (%#lx)\n", guest_base, guest_base + guest_limit, guest_limit); - lguest_fd = open_or_die("/dev/lguest", O_RDWR); - if (write(lguest_fd, args, sizeof(args)) < 0) + fd = open_or_die("/dev/lguest", O_RDWR); + if (write(fd, args, sizeof(args)) < 0) err(1, "Writing to /dev/lguest"); + + /* We return the /dev/lguest file descriptor to control this Guest */ + return fd; } /*:*/ +static void add_device_fd(int fd) +{ + FD_SET(fd, &devices.infds); + if (fd > devices.max_infd) + devices.max_infd = fd; +} + +/*L:200 + * The Waker. + * + * With console, block and network devices, we can have lots of input which we + * need to process. We could try to tell the kernel what file descriptors to + * watch, but handing a file descriptor mask through to the kernel is fairly + * icky. + * + * Instead, we clone off a thread which watches the file descriptors and writes + * the LHREQ_BREAK command to the /dev/lguest file descriptor to tell the Host + * stop running the Guest. This causes the Launcher to return from the + * /dev/lguest read with -EAGAIN, where it will write to /dev/lguest to reset + * the LHREQ_BREAK and wake us up again. + * + * This, of course, is merely a different *kind* of icky. + * + * Given my well-known antipathy to threads, I'd prefer to use processes. But + * it's easier to share Guest memory with threads, and trivial to share the + * devices.infds as the Launcher changes it. + */ +static int waker(void *unused) +{ + /* Close the write end of the pipe: only the Launcher has it open. */ + close(waker_fds.pipe[1]); + + for (;;) { + fd_set rfds = devices.infds; + unsigned long args[] = { LHREQ_BREAK, 1 }; + unsigned int maxfd = devices.max_infd; + + /* We also listen to the pipe from the Launcher. */ + FD_SET(waker_fds.pipe[0], &rfds); + if (waker_fds.pipe[0] > maxfd) + maxfd = waker_fds.pipe[0]; + + /* Wait until input is ready from one of the devices. */ + select(maxfd+1, &rfds, NULL, NULL, NULL); + + /* Message from Launcher? */ + if (FD_ISSET(waker_fds.pipe[0], &rfds)) { + char c; + /* If this fails, then assume Launcher has exited. + * Don't do anything on exit: we're just a thread! */ + if (read(waker_fds.pipe[0], &c, 1) != 1) + _exit(0); + continue; + } + + /* Send LHREQ_BREAK command to snap the Launcher out of it. */ + pwrite(waker_fds.lguest_fd, args, sizeof(args), cpu_id); + } + return 0; +} + +/* This routine just sets up a pipe to the Waker process. */ +static void setup_waker(int lguest_fd) +{ + /* This pipe is closed when Launcher dies, telling Waker. */ + if (pipe(waker_fds.pipe) != 0) + err(1, "Creating pipe for Waker"); + + /* Waker also needs to know the lguest fd */ + waker_fds.lguest_fd = lguest_fd; + + if (clone(waker, malloc(4096) + 4096, CLONE_VM | SIGCHLD, NULL) == -1) + err(1, "Creating Waker"); +} + /* * Device Handling. * @@ -536,90 +623,49 @@ static void *_check_pointer(unsigned long addr, unsigned int size, /* Each buffer in the virtqueues is actually a chain of descriptors. This * function returns the next descriptor in the chain, or vq->vring.num if we're * at the end. */ -static unsigned next_desc(struct vring_desc *desc, - unsigned int i, unsigned int max) +static unsigned next_desc(struct virtqueue *vq, unsigned int i) { unsigned int next; /* If this descriptor says it doesn't chain, we're done. */ - if (!(desc[i].flags & VRING_DESC_F_NEXT)) - return max; + if (!(vq->vring.desc[i].flags & VRING_DESC_F_NEXT)) + return vq->vring.num; /* Check they're not leading us off end of descriptors. */ - next = desc[i].next; + next = vq->vring.desc[i].next; /* Make sure compiler knows to grab that: we don't want it changing! */ wmb(); - if (next >= max) + if (next >= vq->vring.num) errx(1, "Desc next is %u", next); return next; } -/* This actually sends the interrupt for this virtqueue */ -static void trigger_irq(struct virtqueue *vq) -{ - unsigned long buf[] = { LHREQ_IRQ, vq->config.irq }; - - /* Don't inform them if nothing used. */ - if (!vq->pending_used) - return; - vq->pending_used = 0; - - /* If they don't want an interrupt, don't send one, unless empty. */ - if ((vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) - && lg_last_avail(vq) != vq->vring.avail->idx) - return; - - /* Send the Guest an interrupt tell them we used something up. */ - if (write(lguest_fd, buf, sizeof(buf)) != 0) - err(1, "Triggering irq %i", vq->config.irq); -} - /* This looks in the virtqueue and for the first available buffer, and converts * it to an iovec for convenient access. Since descriptors consist of some * number of output then some number of input descriptors, it's actually two * iovecs, but we pack them into one and note how many of each there were. * - * This function returns the descriptor number found. */ -static unsigned wait_for_vq_desc(struct virtqueue *vq, - struct iovec iov[], - unsigned int *out_num, unsigned int *in_num) + * This function returns the descriptor number found, or vq->vring.num (which + * is never a valid descriptor number) if none was found. */ +static unsigned get_vq_desc(struct virtqueue *vq, + struct iovec iov[], + unsigned int *out_num, unsigned int *in_num) { - unsigned int i, head, max; - struct vring_desc *desc; - u16 last_avail = lg_last_avail(vq); - - while (last_avail == vq->vring.avail->idx) { - u64 event; - - /* OK, tell Guest about progress up to now. */ - trigger_irq(vq); - - /* OK, now we need to know about added descriptors. */ - vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY; - - /* They could have slipped one in as we were doing that: make - * sure it's written, then check again. */ - mb(); - if (last_avail != vq->vring.avail->idx) { - vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; - break; - } - - /* Nothing new? Wait for eventfd to tell us they refilled. */ - if (read(vq->eventfd, &event, sizeof(event)) != sizeof(event)) - errx(1, "Event read failed?"); - - /* We don't need to be notified again. */ - vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; - } + unsigned int i, head; + u16 last_avail; /* Check it isn't doing very strange things with descriptor numbers. */ + last_avail = lg_last_avail(vq); if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num) errx(1, "Guest moved used index from %u to %u", last_avail, vq->vring.avail->idx); + /* If there's nothing new since last we looked, return invalid. */ + if (vq->vring.avail->idx == last_avail) + return vq->vring.num; + /* Grab the next descriptor number they're advertising, and increment * the index we've seen. */ head = vq->vring.avail->ring[last_avail % vq->vring.num]; @@ -632,28 +678,15 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, /* When we start there are none of either input nor output. */ *out_num = *in_num = 0; - max = vq->vring.num; - desc = vq->vring.desc; i = head; - - /* If this is an indirect entry, then this buffer contains a descriptor - * table which we handle as if it's any normal descriptor chain. */ - if (desc[i].flags & VRING_DESC_F_INDIRECT) { - if (desc[i].len % sizeof(struct vring_desc)) - errx(1, "Invalid size for indirect buffer table"); - - max = desc[i].len / sizeof(struct vring_desc); - desc = check_pointer(desc[i].addr, desc[i].len); - i = 0; - } - do { /* Grab the first descriptor, and check it's OK. */ - iov[*out_num + *in_num].iov_len = desc[i].len; + iov[*out_num + *in_num].iov_len = vq->vring.desc[i].len; iov[*out_num + *in_num].iov_base - = check_pointer(desc[i].addr, desc[i].len); + = check_pointer(vq->vring.desc[i].addr, + vq->vring.desc[i].len); /* If this is an input descriptor, increment that count. */ - if (desc[i].flags & VRING_DESC_F_WRITE) + if (vq->vring.desc[i].flags & VRING_DESC_F_WRITE) (*in_num)++; else { /* If it's an output descriptor, they're all supposed @@ -664,10 +697,11 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, } /* If we've got too many, that implies a descriptor loop. */ - if (*out_num + *in_num > max) + if (*out_num + *in_num > vq->vring.num) errx(1, "Looped descriptor"); - } while ((i = next_desc(desc, i, max)) != max); + } while ((i = next_desc(vq, i)) != vq->vring.num); + vq->inflight++; return head; } @@ -685,20 +719,44 @@ static void add_used(struct virtqueue *vq, unsigned int head, int len) /* Make sure buffer is written before we update index. */ wmb(); vq->vring.used->idx++; - vq->pending_used++; + vq->inflight--; +} + +/* This actually sends the interrupt for this virtqueue */ +static void trigger_irq(int fd, struct virtqueue *vq) +{ + unsigned long buf[] = { LHREQ_IRQ, vq->config.irq }; + + /* If they don't want an interrupt, don't send one, unless empty. */ + if ((vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) + && vq->inflight) + return; + + /* Send the Guest an interrupt tell them we used something up. */ + if (write(fd, buf, sizeof(buf)) != 0) + err(1, "Triggering irq %i", vq->config.irq); } /* And here's the combo meal deal. Supersize me! */ -static void add_used_and_trigger(struct virtqueue *vq, unsigned head, int len) +static void add_used_and_trigger(int fd, struct virtqueue *vq, + unsigned int head, int len) { add_used(vq, head, len); - trigger_irq(vq); + trigger_irq(fd, vq); } /* * The Console * - * We associate some data with the console for our exit hack. */ + * Here is the input terminal setting we save, and the routine to restore them + * on exit so the user gets their terminal back. */ +static struct termios orig_term; +static void restore_term(void) +{ + tcsetattr(STDIN_FILENO, TCSANOW, &orig_term); +} + +/* We associate some data with the console for our exit hack. */ struct console_abort { /* How many times have they hit ^C? */ @@ -708,275 +766,276 @@ struct console_abort }; /* This is the routine which handles console input (ie. stdin). */ -static void console_input(struct virtqueue *vq) +static bool handle_console_input(int fd, struct device *dev) { int len; unsigned int head, in_num, out_num; - struct console_abort *abort = vq->dev->priv; - struct iovec iov[vq->vring.num]; + struct iovec iov[dev->vq->vring.num]; + struct console_abort *abort = dev->priv; + + /* First we need a console buffer from the Guests's input virtqueue. */ + head = get_vq_desc(dev->vq, iov, &out_num, &in_num); + + /* If they're not ready for input, stop listening to this file + * descriptor. We'll start again once they add an input buffer. */ + if (head == dev->vq->vring.num) + return false; - /* Make sure there's a descriptor waiting. */ - head = wait_for_vq_desc(vq, iov, &out_num, &in_num); if (out_num) errx(1, "Output buffers in console in queue?"); - /* Read it in. */ - len = readv(STDIN_FILENO, iov, in_num); + /* This is why we convert to iovecs: the readv() call uses them, and so + * it reads straight into the Guest's buffer. */ + len = readv(dev->fd, iov, in_num); if (len <= 0) { - /* Ran out of input? */ + /* This implies that the console is closed, is /dev/null, or + * something went terribly wrong. */ warnx("Failed to get console input, ignoring console."); - /* For simplicity, dying threads kill the whole Launcher. So - * just nap here. */ - for (;;) - pause(); + /* Put the input terminal back. */ + restore_term(); + /* Remove callback from input vq, so it doesn't restart us. */ + dev->vq->handle_output = NULL; + /* Stop listening to this fd: don't call us again. */ + return false; } - add_used_and_trigger(vq, head, len); + /* Tell the Guest about the new input. */ + add_used_and_trigger(fd, dev->vq, head, len); /* Three ^C within one second? Exit. * - * This is such a hack, but works surprisingly well. Each ^C has to - * be in a buffer by itself, so they can't be too fast. But we check - * that we get three within about a second, so they can't be too - * slow. */ - if (len != 1 || ((char *)iov[0].iov_base)[0] != 3) { + * This is such a hack, but works surprisingly well. Each ^C has to be + * in a buffer by itself, so they can't be too fast. But we check that + * we get three within about a second, so they can't be too slow. */ + if (len == 1 && ((char *)iov[0].iov_base)[0] == 3) { + if (!abort->count++) + gettimeofday(&abort->start, NULL); + else if (abort->count == 3) { + struct timeval now; + gettimeofday(&now, NULL); + if (now.tv_sec <= abort->start.tv_sec+1) { + unsigned long args[] = { LHREQ_BREAK, 0 }; + /* Close the fd so Waker will know it has to + * exit. */ + close(waker_fds.pipe[1]); + /* Just in case Waker is blocked in BREAK, send + * unbreak now. */ + write(fd, args, sizeof(args)); + exit(2); + } + abort->count = 0; + } + } else + /* Any other key resets the abort counter. */ abort->count = 0; - return; - } - abort->count++; - if (abort->count == 1) - gettimeofday(&abort->start, NULL); - else if (abort->count == 3) { - struct timeval now; - gettimeofday(&now, NULL); - /* Kill all Launcher processes with SIGINT, like normal ^C */ - if (now.tv_sec <= abort->start.tv_sec+1) - kill(0, SIGINT); - abort->count = 0; - } + /* Everything went OK! */ + return true; } -/* This is the routine which handles console output (ie. stdout). */ -static void console_output(struct virtqueue *vq) +/* Handling output for console is simple: we just get all the output buffers + * and write them to stdout. */ +static void handle_console_output(int fd, struct virtqueue *vq, bool timeout) { unsigned int head, out, in; + int len; struct iovec iov[vq->vring.num]; - head = wait_for_vq_desc(vq, iov, &out, &in); - if (in) - errx(1, "Input buffers in console output queue?"); - while (!iov_empty(iov, out)) { - int len = writev(STDOUT_FILENO, iov, out); - if (len <= 0) - err(1, "Write to stdout gave %i", len); - iov_consume(iov, out, len); + /* Keep getting output buffers from the Guest until we run out. */ + while ((head = get_vq_desc(vq, iov, &out, &in)) != vq->vring.num) { + if (in) + errx(1, "Input buffers in output queue?"); + len = writev(STDOUT_FILENO, iov, out); + add_used_and_trigger(fd, vq, head, len); } - add_used(vq, head, 0); +} + +/* This is called when we no longer want to hear about Guest changes to a + * virtqueue. This is more efficient in high-traffic cases, but it means we + * have to set a timer to check if any more changes have occurred. */ +static void block_vq(struct virtqueue *vq) +{ + struct itimerval itm; + + vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; + vq->blocked = true; + + itm.it_interval.tv_sec = 0; + itm.it_interval.tv_usec = 0; + itm.it_value.tv_sec = 0; + itm.it_value.tv_usec = timeout_usec; + + setitimer(ITIMER_REAL, &itm, NULL); } /* * The Network * * Handling output for network is also simple: we get all the output buffers - * and write them to /dev/net/tun. + * and write them (ignoring the first element) to this device's file descriptor + * (/dev/net/tun). */ -struct net_info { - int tunfd; -}; - -static void net_output(struct virtqueue *vq) +static void handle_net_output(int fd, struct virtqueue *vq, bool timeout) { - struct net_info *net_info = vq->dev->priv; - unsigned int head, out, in; + unsigned int head, out, in, num = 0; + int len; struct iovec iov[vq->vring.num]; + static int last_timeout_num; + + /* Keep getting output buffers from the Guest until we run out. */ + while ((head = get_vq_desc(vq, iov, &out, &in)) != vq->vring.num) { + if (in) + errx(1, "Input buffers in output queue?"); + len = writev(vq->dev->fd, iov, out); + if (len < 0) + err(1, "Writing network packet to tun"); + add_used_and_trigger(fd, vq, head, len); + num++; + } - head = wait_for_vq_desc(vq, iov, &out, &in); - if (in) - errx(1, "Input buffers in net output queue?"); - if (writev(net_info->tunfd, iov, out) < 0) - errx(1, "Write to tun failed?"); - add_used(vq, head, 0); -} - -/* Will reading from this file descriptor block? */ -static bool will_block(int fd) -{ - fd_set fdset; - struct timeval zero = { 0, 0 }; - FD_ZERO(&fdset); - FD_SET(fd, &fdset); - return select(fd+1, &fdset, NULL, NULL, &zero) != 1; + /* Block further kicks and set up a timer if we saw anything. */ + if (!timeout && num) + block_vq(vq); + + /* We never quite know how long should we wait before we check the + * queue again for more packets. We start at 500 microseconds, and if + * we get fewer packets than last time, we assume we made the timeout + * too small and increase it by 10 microseconds. Otherwise, we drop it + * by one microsecond every time. It seems to work well enough. */ + if (timeout) { + if (num < last_timeout_num) + timeout_usec += 10; + else if (timeout_usec > 1) + timeout_usec--; + last_timeout_num = num; + } } -/* This is where we handle packets coming in from the tun device to our +/* This is where we handle a packet coming in from the tun device to our * Guest. */ -static void net_input(struct virtqueue *vq) +static bool handle_tun_input(int fd, struct device *dev) { + unsigned int head, in_num, out_num; int len; - unsigned int head, out, in; - struct iovec iov[vq->vring.num]; - struct net_info *net_info = vq->dev->priv; - - head = wait_for_vq_desc(vq, iov, &out, &in); - if (out) - errx(1, "Output buffers in net input queue?"); - - /* Deliver interrupt now, since we're about to sleep. */ - if (vq->pending_used && will_block(net_info->tunfd)) - trigger_irq(vq); - - len = readv(net_info->tunfd, iov, in); + struct iovec iov[dev->vq->vring.num]; + + /* First we need a network buffer from the Guests's recv virtqueue. */ + head = get_vq_desc(dev->vq, iov, &out_num, &in_num); + if (head == dev->vq->vring.num) { + /* Now, it's expected that if we try to send a packet too + * early, the Guest won't be ready yet. Wait until the device + * status says it's ready. */ + /* FIXME: Actually want DRIVER_ACTIVE here. */ + + /* Now tell it we want to know if new things appear. */ + dev->vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY; + wmb(); + + /* We'll turn this back on if input buffers are registered. */ + return false; + } else if (out_num) + errx(1, "Output buffers in network recv queue?"); + + /* Read the packet from the device directly into the Guest's buffer. */ + len = readv(dev->fd, iov, in_num); if (len <= 0) - err(1, "Failed to read from tun."); - add_used(vq, head, len); -} + err(1, "reading network"); -/* This is the helper to create threads. */ -static int do_thread(void *_vq) -{ - struct virtqueue *vq = _vq; + /* Tell the Guest about the new packet. */ + add_used_and_trigger(fd, dev->vq, head, len); - for (;;) - vq->service(vq); - return 0; -} + verbose("tun input packet len %i [%02x %02x] (%s)\n", len, + ((u8 *)iov[1].iov_base)[0], ((u8 *)iov[1].iov_base)[1], + head != dev->vq->vring.num ? "sent" : "discarded"); -/* When a child dies, we kill our entire process group with SIGTERM. This - * also has the side effect that the shell restores the console for us! */ -static void kill_launcher(int signal) -{ - kill(0, SIGTERM); + /* All good. */ + return true; } -static void reset_device(struct device *dev) +/*L:215 This is the callback attached to the network and console input + * virtqueues: it ensures we try again, in case we stopped console or net + * delivery because Guest didn't have any buffers. */ +static void enable_fd(int fd, struct virtqueue *vq, bool timeout) { - struct virtqueue *vq; - - verbose("Resetting device %s\n", dev->name); - - /* Clear any features they've acked. */ - memset(get_feature_bits(dev) + dev->feature_len, 0, dev->feature_len); - - /* We're going to be explicitly killing threads, so ignore them. */ - signal(SIGCHLD, SIG_IGN); - - /* Zero out the virtqueues, get rid of their threads */ - for (vq = dev->vq; vq; vq = vq->next) { - if (vq->thread != (pid_t)-1) { - kill(vq->thread, SIGTERM); - waitpid(vq->thread, NULL, 0); - vq->thread = (pid_t)-1; - } - memset(vq->vring.desc, 0, - vring_size(vq->config.num, LGUEST_VRING_ALIGN)); - lg_last_avail(vq) = 0; - } - dev->running = false; - - /* Now we care if threads die. */ - signal(SIGCHLD, (void *)kill_launcher); + add_device_fd(vq->dev->fd); + /* Snap the Waker out of its select loop. */ + write(waker_fds.pipe[1], "", 1); } -static void create_thread(struct virtqueue *vq) +static void net_enable_fd(int fd, struct virtqueue *vq, bool timeout) { - /* Create stack for thread and run it. Since stack grows - * upwards, we point the stack pointer to the end of this - * region. */ - char *stack = malloc(32768); - unsigned long args[] = { LHREQ_EVENTFD, - vq->config.pfn*getpagesize(), 0 }; - - /* Create a zero-initialized eventfd. */ - vq->eventfd = eventfd(0, 0); - if (vq->eventfd < 0) - err(1, "Creating eventfd"); - args[2] = vq->eventfd; - - /* Attach an eventfd to this virtqueue: it will go off - * when the Guest does an LHCALL_NOTIFY for this vq. */ - if (write(lguest_fd, &args, sizeof(args)) != 0) - err(1, "Attaching eventfd"); - - /* CLONE_VM: because it has to access the Guest memory, and - * SIGCHLD so we get a signal if it dies. */ - vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq); - if (vq->thread == (pid_t)-1) - err(1, "Creating clone"); - /* We close our local copy, now the child has it. */ - close(vq->eventfd); + /* We don't need to know again when Guest refills receive buffer. */ + vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; + enable_fd(fd, vq, timeout); } -static void start_device(struct device *dev) +/* When the Guest tells us they updated the status field, we handle it. */ +static void update_device_status(struct device *dev) { - unsigned int i; struct virtqueue *vq; - verbose("Device %s OK: offered", dev->name); - for (i = 0; i < dev->feature_len; i++) - verbose(" %02x", get_feature_bits(dev)[i]); - verbose(", accepted"); - for (i = 0; i < dev->feature_len; i++) - verbose(" %02x", get_feature_bits(dev) - [dev->feature_len+i]); - - for (vq = dev->vq; vq; vq = vq->next) { - if (vq->service) - create_thread(vq); - } - dev->running = true; -} + /* This is a reset. */ + if (dev->desc->status == 0) { + verbose("Resetting device %s\n", dev->name); -static void cleanup_devices(void) -{ - struct device *dev; - - for (dev = devices.dev; dev; dev = dev->next) - reset_device(dev); - - /* If we saved off the original terminal settings, restore them now. */ - if (orig_term.c_lflag & (ISIG|ICANON|ECHO)) - tcsetattr(STDIN_FILENO, TCSANOW, &orig_term); -} + /* Clear any features they've acked. */ + memset(get_feature_bits(dev) + dev->desc->feature_len, 0, + dev->desc->feature_len); -/* When the Guest tells us they updated the status field, we handle it. */ -static void update_device_status(struct device *dev) -{ - /* A zero status is a reset, otherwise it's a set of flags. */ - if (dev->desc->status == 0) - reset_device(dev); - else if (dev->desc->status & VIRTIO_CONFIG_S_FAILED) { + /* Zero out the virtqueues. */ + for (vq = dev->vq; vq; vq = vq->next) { + memset(vq->vring.desc, 0, + vring_size(vq->config.num, LGUEST_VRING_ALIGN)); + lg_last_avail(vq) = 0; + } + } else if (dev->desc->status & VIRTIO_CONFIG_S_FAILED) { warnx("Device %s configuration FAILED", dev->name); - if (dev->running) - reset_device(dev); } else if (dev->desc->status & VIRTIO_CONFIG_S_DRIVER_OK) { - if (!dev->running) - start_device(dev); + unsigned int i; + + verbose("Device %s OK: offered", dev->name); + for (i = 0; i < dev->desc->feature_len; i++) + verbose(" %02x", get_feature_bits(dev)[i]); + verbose(", accepted"); + for (i = 0; i < dev->desc->feature_len; i++) + verbose(" %02x", get_feature_bits(dev) + [dev->desc->feature_len+i]); + + if (dev->ready) + dev->ready(dev); } } /* This is the generic routine we call when the Guest uses LHCALL_NOTIFY. */ -static void handle_output(unsigned long addr) +static void handle_output(int fd, unsigned long addr) { struct device *i; + struct virtqueue *vq; - /* Check each device. */ + /* Check each device and virtqueue. */ for (i = devices.dev; i; i = i->next) { - struct virtqueue *vq; - /* Notifications to device descriptors update device status. */ if (from_guest_phys(addr) == i->desc) { update_device_status(i); return; } - /* Devices *can* be used before status is set to DRIVER_OK. */ + /* Notifications to virtqueues mean output has occurred. */ for (vq = i->vq; vq; vq = vq->next) { - if (addr != vq->config.pfn*getpagesize()) + if (vq->config.pfn != addr/getpagesize()) continue; - if (i->running) - errx(1, "Notification on running %s", i->name); - start_device(i); + + /* Guest should acknowledge (and set features!) before + * using the device. */ + if (i->desc->status == 0) { + warnx("%s gave early output", i->name); + return; + } + + if (strcmp(vq->dev->name, "console") != 0) + verbose("Output to %s\n", vq->dev->name); + if (vq->handle_output) + vq->handle_output(fd, vq, false); return; } } @@ -990,6 +1049,71 @@ static void handle_output(unsigned long addr) strnlen(from_guest_phys(addr), guest_limit - addr)); } +static void handle_timeout(int fd) +{ + char buf[32]; + struct device *i; + struct virtqueue *vq; + + /* Clear the pipe */ + read(timeoutpipe[0], buf, sizeof(buf)); + + /* Check each device and virtqueue: flush blocked ones. */ + for (i = devices.dev; i; i = i->next) { + for (vq = i->vq; vq; vq = vq->next) { + if (!vq->blocked) + continue; + + vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY; + vq->blocked = false; + if (vq->handle_output) + vq->handle_output(fd, vq, true); + } + } +} + +/* This is called when the Waker wakes us up: check for incoming file + * descriptors. */ +static void handle_input(int fd) +{ + /* select() wants a zeroed timeval to mean "don't wait". */ + struct timeval poll = { .tv_sec = 0, .tv_usec = 0 }; + + for (;;) { + struct device *i; + fd_set fds = devices.infds; + int num; + + num = select(devices.max_infd+1, &fds, NULL, NULL, &poll); + /* Could get interrupted */ + if (num < 0) + continue; + /* If nothing is ready, we're done. */ + if (num == 0) + break; + + /* Otherwise, call the device(s) which have readable file + * descriptors and a method of handling them. */ + for (i = devices.dev; i; i = i->next) { + if (i->handle_input && FD_ISSET(i->fd, &fds)) { + if (i->handle_input(fd, i)) + continue; + + /* If handle_input() returns false, it means we + * should no longer service it. Networking and + * console do this when there's no input + * buffers to deliver into. Console also uses + * it when it discovers that stdin is closed. */ + FD_CLR(i->fd, &devices.infds); + } + } + + /* Is this the timeout fd? */ + if (FD_ISSET(timeoutpipe[0], &fds)) + handle_timeout(fd); + } +} + /*L:190 * Device Setup * @@ -1005,8 +1129,8 @@ static void handle_output(unsigned long addr) static u8 *device_config(const struct device *dev) { return (void *)(dev->desc + 1) - + dev->num_vq * sizeof(struct lguest_vqconfig) - + dev->feature_len * 2; + + dev->desc->num_vq * sizeof(struct lguest_vqconfig) + + dev->desc->feature_len * 2; } /* This routine allocates a new "struct lguest_device_desc" from descriptor @@ -1035,7 +1159,7 @@ static struct lguest_device_desc *new_dev_desc(u16 type) /* Each device descriptor is followed by the description of its virtqueues. We * specify how many descriptors the virtqueue is to have. */ static void add_virtqueue(struct device *dev, unsigned int num_descs, - void (*service)(struct virtqueue *)) + void (*handle_output)(int, struct virtqueue *, bool)) { unsigned int pages; struct virtqueue **i, *vq = malloc(sizeof(*vq)); @@ -1050,8 +1174,8 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs, vq->next = NULL; vq->last_avail_idx = 0; vq->dev = dev; - vq->service = service; - vq->thread = (pid_t)-1; + vq->inflight = 0; + vq->blocked = false; /* Initialize the configuration. */ vq->config.num = num_descs; @@ -1067,7 +1191,6 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs, * yet, otherwise we'd be overwriting them. */ assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0); memcpy(device_config(dev), &vq->config, sizeof(vq->config)); - dev->num_vq++; dev->desc->num_vq++; verbose("Virtqueue page %#lx\n", to_guest_phys(p)); @@ -1076,6 +1199,15 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs, * second. */ for (i = &dev->vq; *i; i = &(*i)->next); *i = vq; + + /* Set the routine to call when the Guest does something to this + * virtqueue. */ + vq->handle_output = handle_output; + + /* As an optimization, set the advisory "Don't Notify Me" flag if we + * don't have a handler */ + if (!handle_output) + vq->vring.used->flags = VRING_USED_F_NO_NOTIFY; } /* The first half of the feature bitmask is for us to advertise features. The @@ -1087,7 +1219,7 @@ static void add_feature(struct device *dev, unsigned bit) /* We can't extend the feature bits once we've added config bytes */ if (dev->desc->feature_len <= bit / CHAR_BIT) { assert(dev->desc->config_len == 0); - dev->feature_len = dev->desc->feature_len = (bit/CHAR_BIT) + 1; + dev->desc->feature_len = (bit / CHAR_BIT) + 1; } features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT)); @@ -1111,17 +1243,22 @@ static void set_config(struct device *dev, unsigned len, const void *conf) * calling new_dev_desc() to allocate the descriptor and device memory. * * See what I mean about userspace being boring? */ -static struct device *new_device(const char *name, u16 type) +static struct device *new_device(const char *name, u16 type, int fd, + bool (*handle_input)(int, struct device *)) { struct device *dev = malloc(sizeof(*dev)); /* Now we populate the fields one at a time. */ + dev->fd = fd; + /* If we have an input handler for this file descriptor, then we add it + * to the device_list's fdset and maxfd. */ + if (handle_input) + add_device_fd(dev->fd); dev->desc = new_dev_desc(type); + dev->handle_input = handle_input; dev->name = name; dev->vq = NULL; - dev->feature_len = 0; - dev->num_vq = 0; - dev->running = false; + dev->ready = NULL; /* Append to device list. Prepending to a single-linked list is * easier, but the user expects the devices to be arranged on the bus @@ -1149,10 +1286,13 @@ static void setup_console(void) * raw input stream to the Guest. */ term.c_lflag &= ~(ISIG|ICANON|ECHO); tcsetattr(STDIN_FILENO, TCSANOW, &term); + /* If we exit gracefully, the original settings will be + * restored so the user can see what they're typing. */ + atexit(restore_term); } - dev = new_device("console", VIRTIO_ID_CONSOLE); - + dev = new_device("console", VIRTIO_ID_CONSOLE, + STDIN_FILENO, handle_console_input); /* We store the console state in dev->priv, and initialize it. */ dev->priv = malloc(sizeof(struct console_abort)); ((struct console_abort *)dev->priv)->count = 0; @@ -1161,13 +1301,31 @@ static void setup_console(void) * they put something the input queue, we make sure we're listening to * stdin. When they put something in the output queue, we write it to * stdout. */ - add_virtqueue(dev, VIRTQUEUE_NUM, console_input); - add_virtqueue(dev, VIRTQUEUE_NUM, console_output); + add_virtqueue(dev, VIRTQUEUE_NUM, enable_fd); + add_virtqueue(dev, VIRTQUEUE_NUM, handle_console_output); - verbose("device %u: console\n", ++devices.device_num); + verbose("device %u: console\n", devices.device_num++); } /*:*/ +static void timeout_alarm(int sig) +{ + write(timeoutpipe[1], "", 1); +} + +static void setup_timeout(void) +{ + if (pipe(timeoutpipe) != 0) + err(1, "Creating timeout pipe"); + + if (fcntl(timeoutpipe[1], F_SETFL, + fcntl(timeoutpipe[1], F_GETFL) | O_NONBLOCK) != 0) + err(1, "Making timeout pipe nonblocking"); + + add_device_fd(timeoutpipe[0]); + signal(SIGALRM, timeout_alarm); +} + /*M:010 Inter-guest networking is an interesting area. Simplest is to have a * --sharenet= option which opens or creates a named pipe. This can be * used to send packets to another guest in a 1:1 manner. @@ -1289,23 +1447,21 @@ static int get_tun_device(char tapif[IFNAMSIZ]) static void setup_tun_net(char *arg) { struct device *dev; - struct net_info *net_info = malloc(sizeof(*net_info)); - int ipfd; + int netfd, ipfd; u32 ip = INADDR_ANY; bool bridging = false; char tapif[IFNAMSIZ], *p; struct virtio_net_config conf; - net_info->tunfd = get_tun_device(tapif); + netfd = get_tun_device(tapif); /* First we create a new network device. */ - dev = new_device("net", VIRTIO_ID_NET); - dev->priv = net_info; + dev = new_device("net", VIRTIO_ID_NET, netfd, handle_tun_input); /* Network devices need a receive and a send queue, just like * console. */ - add_virtqueue(dev, VIRTQUEUE_NUM, net_input); - add_virtqueue(dev, VIRTQUEUE_NUM, net_output); + add_virtqueue(dev, VIRTQUEUE_NUM, net_enable_fd); + add_virtqueue(dev, VIRTQUEUE_NUM, handle_net_output); /* We need a socket to perform the magic network ioctls to bring up the * tap interface, connect to the bridge etc. Any socket will do! */ @@ -1346,8 +1502,6 @@ static void setup_tun_net(char *arg) add_feature(dev, VIRTIO_NET_F_HOST_TSO4); add_feature(dev, VIRTIO_NET_F_HOST_TSO6); add_feature(dev, VIRTIO_NET_F_HOST_ECN); - /* We handle indirect ring entries */ - add_feature(dev, VIRTIO_RING_F_INDIRECT_DESC); set_config(dev, sizeof(conf), &conf); /* We don't need the socket any more; setup is done. */ @@ -1396,18 +1550,20 @@ struct vblk_info * Remember that the block device is handled by a separate I/O thread. We head * straight into the core of that thread here: */ -static void blk_request(struct virtqueue *vq) +static bool service_io(struct device *dev) { - struct vblk_info *vblk = vq->dev->priv; + struct vblk_info *vblk = dev->priv; unsigned int head, out_num, in_num, wlen; int ret; u8 *in; struct virtio_blk_outhdr *out; - struct iovec iov[vq->vring.num]; + struct iovec iov[dev->vq->vring.num]; off64_t off; - /* Get the next request. */ - head = wait_for_vq_desc(vq, iov, &out_num, &in_num); + /* See if there's a request waiting. If not, nothing to do. */ + head = get_vq_desc(dev->vq, iov, &out_num, &in_num); + if (head == dev->vq->vring.num) + return false; /* Every block request should contain at least one output buffer * (detailing the location on disk and the type of request) and one @@ -1481,21 +1637,83 @@ static void blk_request(struct virtqueue *vq) if (out->type & VIRTIO_BLK_T_BARRIER) fdatasync(vblk->fd); - add_used(vq, head, wlen); + /* We can't trigger an IRQ, because we're not the Launcher. It does + * that when we tell it we're done. */ + add_used(dev->vq, head, wlen); + return true; +} + +/* This is the thread which actually services the I/O. */ +static int io_thread(void *_dev) +{ + struct device *dev = _dev; + struct vblk_info *vblk = dev->priv; + char c; + + /* Close other side of workpipe so we get 0 read when main dies. */ + close(vblk->workpipe[1]); + /* Close the other side of the done_fd pipe. */ + close(dev->fd); + + /* When this read fails, it means Launcher died, so we follow. */ + while (read(vblk->workpipe[0], &c, 1) == 1) { + /* We acknowledge each request immediately to reduce latency, + * rather than waiting until we've done them all. I haven't + * measured to see if it makes any difference. + * + * That would be an interesting test, wouldn't it? You could + * also try having more than one I/O thread. */ + while (service_io(dev)) + write(vblk->done_fd, &c, 1); + } + return 0; +} + +/* Now we've seen the I/O thread, we return to the Launcher to see what happens + * when that thread tells us it's completed some I/O. */ +static bool handle_io_finish(int fd, struct device *dev) +{ + char c; + + /* If the I/O thread died, presumably it printed the error, so we + * simply exit. */ + if (read(dev->fd, &c, 1) != 1) + exit(1); + + /* It did some work, so trigger the irq. */ + trigger_irq(fd, dev->vq); + return true; +} + +/* When the Guest submits some I/O, we just need to wake the I/O thread. */ +static void handle_virtblk_output(int fd, struct virtqueue *vq, bool timeout) +{ + struct vblk_info *vblk = vq->dev->priv; + char c = 0; + + /* Wake up I/O thread and tell it to go to work! */ + if (write(vblk->workpipe[1], &c, 1) != 1) + /* Presumably it indicated why it died. */ + exit(1); } /*L:198 This actually sets up a virtual block device. */ static void setup_block_file(const char *filename) { + int p[2]; struct device *dev; struct vblk_info *vblk; + void *stack; struct virtio_blk_config conf; + /* This is the pipe the I/O thread will use to tell us I/O is done. */ + pipe(p); + /* The device responds to return from I/O thread. */ - dev = new_device("block", VIRTIO_ID_BLOCK); + dev = new_device("block", VIRTIO_ID_BLOCK, p[0], handle_io_finish); /* The device has one virtqueue, where the Guest places requests. */ - add_virtqueue(dev, VIRTQUEUE_NUM, blk_request); + add_virtqueue(dev, VIRTQUEUE_NUM, handle_virtblk_output); /* Allocate the room for our own bookkeeping */ vblk = dev->priv = malloc(sizeof(*vblk)); @@ -1517,29 +1735,49 @@ static void setup_block_file(const char *filename) set_config(dev, sizeof(conf), &conf); + /* The I/O thread writes to this end of the pipe when done. */ + vblk->done_fd = p[1]; + + /* This is the second pipe, which is how we tell the I/O thread about + * more work. */ + pipe(vblk->workpipe); + + /* Create stack for thread and run it. Since stack grows upwards, we + * point the stack pointer to the end of this region. */ + stack = malloc(32768); + /* SIGCHLD - We dont "wait" for our cloned thread, so prevent it from + * becoming a zombie. */ + if (clone(io_thread, stack + 32768, CLONE_VM | SIGCHLD, dev) == -1) + err(1, "Creating clone"); + + /* We don't need to keep the I/O thread's end of the pipes open. */ + close(vblk->done_fd); + close(vblk->workpipe[0]); + verbose("device %u: virtblock %llu sectors\n", - ++devices.device_num, le64_to_cpu(conf.capacity)); + devices.device_num, le64_to_cpu(conf.capacity)); } -struct rng_info { - int rfd; -}; - /* Our random number generator device reads from /dev/random into the Guest's * input buffers. The usual case is that the Guest doesn't want random numbers * and so has no buffers although /dev/random is still readable, whereas * console is the reverse. * * The same logic applies, however. */ -static void rng_input(struct virtqueue *vq) +static bool handle_rng_input(int fd, struct device *dev) { int len; unsigned int head, in_num, out_num, totlen = 0; - struct rng_info *rng_info = vq->dev->priv; - struct iovec iov[vq->vring.num]; + struct iovec iov[dev->vq->vring.num]; /* First we need a buffer from the Guests's virtqueue. */ - head = wait_for_vq_desc(vq, iov, &out_num, &in_num); + head = get_vq_desc(dev->vq, iov, &out_num, &in_num); + + /* If they're not ready for input, stop listening to this file + * descriptor. We'll start again once they add an input buffer. */ + if (head == dev->vq->vring.num) + return false; + if (out_num) errx(1, "Output buffers in rng?"); @@ -1547,7 +1785,7 @@ static void rng_input(struct virtqueue *vq) * it reads straight into the Guest's buffer. We loop to make sure we * fill it. */ while (!iov_empty(iov, in_num)) { - len = readv(rng_info->rfd, iov, in_num); + len = readv(dev->fd, iov, in_num); if (len <= 0) err(1, "Read from /dev/random gave %i", len); iov_consume(iov, in_num, len); @@ -1555,23 +1793,25 @@ static void rng_input(struct virtqueue *vq) } /* Tell the Guest about the new input. */ - add_used(vq, head, totlen); + add_used_and_trigger(fd, dev->vq, head, totlen); + + /* Everything went OK! */ + return true; } /* And this creates a "hardware" random number device for the Guest. */ static void setup_rng(void) { struct device *dev; - struct rng_info *rng_info = malloc(sizeof(*rng_info)); + int fd; - rng_info->rfd = open_or_die("/dev/random", O_RDONLY); + fd = open_or_die("/dev/random", O_RDONLY); /* The device responds to return from I/O thread. */ - dev = new_device("rng", VIRTIO_ID_RNG); - dev->priv = rng_info; + dev = new_device("rng", VIRTIO_ID_RNG, fd, handle_rng_input); /* The device has one virtqueue, where the Guest places inbufs. */ - add_virtqueue(dev, VIRTQUEUE_NUM, rng_input); + add_virtqueue(dev, VIRTQUEUE_NUM, enable_fd); verbose("device %u: rng\n", devices.device_num++); } @@ -1587,18 +1827,17 @@ static void __attribute__((noreturn)) restart_guest(void) for (i = 3; i < FD_SETSIZE; i++) close(i); - /* Reset all the devices (kills all threads). */ - cleanup_devices(); - + /* The exec automatically gets rid of the I/O and Waker threads. */ execv(main_args[0], main_args); err(1, "Could not exec %s", main_args[0]); } /*L:220 Finally we reach the core of the Launcher which runs the Guest, serves * its input and output, and finally, lays it to rest. */ -static void __attribute__((noreturn)) run_guest(void) +static void __attribute__((noreturn)) run_guest(int lguest_fd) { for (;;) { + unsigned long args[] = { LHREQ_BREAK, 0 }; unsigned long notify_addr; int readval; @@ -1609,7 +1848,8 @@ static void __attribute__((noreturn)) run_guest(void) /* One unsigned long means the Guest did HCALL_NOTIFY */ if (readval == sizeof(notify_addr)) { verbose("Notify on address %#lx\n", notify_addr); - handle_output(notify_addr); + handle_output(lguest_fd, notify_addr); + continue; /* ENOENT means the Guest died. Reading tells us why. */ } else if (errno == ENOENT) { char reason[1024] = { 0 }; @@ -1618,9 +1858,19 @@ static void __attribute__((noreturn)) run_guest(void) /* ERESTART means that we need to reboot the guest */ } else if (errno == ERESTART) { restart_guest(); - /* Anything else means a bug or incompatible change. */ - } else + /* EAGAIN means a signal (timeout). + * Anything else means a bug or incompatible change. */ + } else if (errno != EAGAIN) err(1, "Running guest failed"); + + /* Only service input on thread for CPU 0. */ + if (cpu_id != 0) + continue; + + /* Service input, then unset the BREAK to release the Waker. */ + handle_input(lguest_fd); + if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0) + err(1, "Resetting break"); } } /*L:240 @@ -1654,8 +1904,8 @@ int main(int argc, char *argv[]) /* Memory, top-level pagetable, code startpoint and size of the * (optional) initrd. */ unsigned long mem = 0, start, initrd_size = 0; - /* Two temporaries. */ - int i, c; + /* Two temporaries and the /dev/lguest file descriptor. */ + int i, c, lguest_fd; /* The boot information for the Guest. */ struct boot_params *boot; /* If they specify an initrd file to load. */ @@ -1663,10 +1913,18 @@ int main(int argc, char *argv[]) /* Save the args: we "reboot" by execing ourselves again. */ main_args = argv; + /* We don't "wait" for the children, so prevent them from becoming + * zombies. */ + signal(SIGCHLD, SIG_IGN); - /* First we initialize the device list. We keep a pointer to the last - * device, and the next interrupt number to use for devices (1: - * remember that 0 is used by the timer). */ + /* First we initialize the device list. Since console and network + * device receive input from a file descriptor, we keep an fdset + * (infds) and the maximum fd number (max_infd) with the head of the + * list. We also keep a pointer to the last device. Finally, we keep + * the next interrupt number to use for devices (1: remember that 0 is + * used by the timer). */ + FD_ZERO(&devices.infds); + devices.max_infd = -1; devices.lastdev = NULL; devices.next_irq = 1; @@ -1724,6 +1982,9 @@ int main(int argc, char *argv[]) /* We always have a console device */ setup_console(); + /* We can timeout waiting for Guest network transmit. */ + setup_timeout(); + /* Now we load the kernel */ start = load_kernel(open_or_die(argv[optind+1], O_RDONLY)); @@ -1762,16 +2023,15 @@ int main(int argc, char *argv[]) /* We tell the kernel to initialize the Guest: this returns the open * /dev/lguest file descriptor. */ - tell_kernel(start); - - /* Ensure that we terminate if a child dies. */ - signal(SIGCHLD, kill_launcher); + lguest_fd = tell_kernel(start); - /* If we exit via err(), this kills all the threads, restores tty. */ - atexit(cleanup_devices); + /* We clone off a thread, which wakes the Launcher whenever one of the + * input file descriptors needs attention. We call this the Waker, and + * we'll cover it in a moment. */ + setup_waker(lguest_fd); /* Finally, run the Guest. This doesn't return. */ - run_guest(); + run_guest(lguest_fd); } /*:*/ diff --git a/trunk/Documentation/lguest/lguest.txt b/trunk/Documentation/lguest/lguest.txt index efb3a6a045a2..28c747362f95 100644 --- a/trunk/Documentation/lguest/lguest.txt +++ b/trunk/Documentation/lguest/lguest.txt @@ -37,6 +37,7 @@ Running Lguest: "Paravirtualized guest support" = Y "Lguest guest support" = Y "High Memory Support" = off/4GB + "PAE (Physical Address Extension) Support" = N "Alignment value to which kernel should be aligned" = 0x100000 (CONFIG_PARAVIRT=y, CONFIG_LGUEST_GUEST=y, CONFIG_HIGHMEM64G=n and CONFIG_PHYSICAL_ALIGN=0x100000) diff --git a/trunk/arch/alpha/mm/extable.c b/trunk/arch/alpha/mm/extable.c index 813c9b63c0e1..62dc379d301a 100644 --- a/trunk/arch/alpha/mm/extable.c +++ b/trunk/arch/alpha/mm/extable.c @@ -48,27 +48,6 @@ void sort_extable(struct exception_table_entry *start, cmp_ex, swap_ex); } -#ifdef CONFIG_MODULES -/* - * Any entry referring to the module init will be at the beginning or - * the end. - */ -void trim_init_extable(struct module *m) -{ - /*trim the beginning*/ - while (m->num_exentries && - within_module_init(ex_to_addr(&m->extable[0]), m)) { - m->extable++; - m->num_exentries--; - } - /*trim the end*/ - while (m->num_exentries && - within_module_init(ex_to_addr(&m->extable[m->num_exentries-1]), - m)) - m->num_exentries--; -} -#endif /* CONFIG_MODULES */ - const struct exception_table_entry * search_extable(const struct exception_table_entry *first, const struct exception_table_entry *last, diff --git a/trunk/arch/avr32/kernel/module.c b/trunk/arch/avr32/kernel/module.c index 98f94d041d9c..1167fe9cf6c4 100644 --- a/trunk/arch/avr32/kernel/module.c +++ b/trunk/arch/avr32/kernel/module.c @@ -32,6 +32,8 @@ void module_free(struct module *mod, void *module_region) mod->arch.syminfo = NULL; vfree(module_region); + /* FIXME: if module_region == mod->init_region, trim exception + * table entries. */ } static inline int check_rela(Elf32_Rela *rela, struct module *module, diff --git a/trunk/arch/cris/kernel/module.c b/trunk/arch/cris/kernel/module.c index abc13e368b90..a187833febc8 100644 --- a/trunk/arch/cris/kernel/module.c +++ b/trunk/arch/cris/kernel/module.c @@ -48,6 +48,8 @@ void *module_alloc(unsigned long size) void module_free(struct module *mod, void *module_region) { FREE_MODULE(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + table entries. */ } /* We don't need anything special. */ diff --git a/trunk/arch/frv/kernel/module.c b/trunk/arch/frv/kernel/module.c index 711763c8a6f3..850d168f69fc 100644 --- a/trunk/arch/frv/kernel/module.c +++ b/trunk/arch/frv/kernel/module.c @@ -35,6 +35,8 @@ void *module_alloc(unsigned long size) void module_free(struct module *mod, void *module_region) { vfree(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + table entries. */ } /* We don't need anything special. */ diff --git a/trunk/arch/h8300/kernel/module.c b/trunk/arch/h8300/kernel/module.c index 0865e291c20d..cfc9127d2ced 100644 --- a/trunk/arch/h8300/kernel/module.c +++ b/trunk/arch/h8300/kernel/module.c @@ -23,6 +23,8 @@ void *module_alloc(unsigned long size) void module_free(struct module *mod, void *module_region) { vfree(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + table entries. */ } /* We don't need anything special. */ diff --git a/trunk/arch/ia64/mm/extable.c b/trunk/arch/ia64/mm/extable.c index e95d5ad9285d..71c50dd8f870 100644 --- a/trunk/arch/ia64/mm/extable.c +++ b/trunk/arch/ia64/mm/extable.c @@ -53,32 +53,6 @@ void sort_extable (struct exception_table_entry *start, cmp_ex, swap_ex); } -static inline unsigned long ex_to_addr(const struct exception_table_entry *x) -{ - return (unsigned long)&x->insn + x->insn; -} - -#ifdef CONFIG_MODULES -/* - * Any entry referring to the module init will be at the beginning or - * the end. - */ -void trim_init_extable(struct module *m) -{ - /*trim the beginning*/ - while (m->num_exentries && - within_module_init(ex_to_addr(&m->extable[0]), m)) { - m->extable++; - m->num_exentries--; - } - /*trim the end*/ - while (m->num_exentries && - within_module_init(ex_to_addr(&m->extable[m->num_exentries-1]), - m)) - m->num_exentries--; -} -#endif /* CONFIG_MODULES */ - const struct exception_table_entry * search_extable (const struct exception_table_entry *first, const struct exception_table_entry *last, diff --git a/trunk/arch/m32r/kernel/module.c b/trunk/arch/m32r/kernel/module.c index cb5f37d78d49..8d4205794380 100644 --- a/trunk/arch/m32r/kernel/module.c +++ b/trunk/arch/m32r/kernel/module.c @@ -44,6 +44,8 @@ void *module_alloc(unsigned long size) void module_free(struct module *mod, void *module_region) { vfree(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + table entries. */ } /* We don't need anything special. */ diff --git a/trunk/arch/m68k/kernel/module.c b/trunk/arch/m68k/kernel/module.c index cd6bcb1c957e..774862bc6977 100644 --- a/trunk/arch/m68k/kernel/module.c +++ b/trunk/arch/m68k/kernel/module.c @@ -31,6 +31,8 @@ void *module_alloc(unsigned long size) void module_free(struct module *mod, void *module_region) { vfree(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + table entries. */ } /* We don't need anything special. */ diff --git a/trunk/arch/m68knommu/kernel/module.c b/trunk/arch/m68knommu/kernel/module.c index d11ffae7956a..3b1a2ff61ddc 100644 --- a/trunk/arch/m68knommu/kernel/module.c +++ b/trunk/arch/m68knommu/kernel/module.c @@ -23,6 +23,8 @@ void *module_alloc(unsigned long size) void module_free(struct module *mod, void *module_region) { vfree(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + table entries. */ } /* We don't need anything special. */ diff --git a/trunk/arch/mips/kernel/module.c b/trunk/arch/mips/kernel/module.c index 3e9100dcc12d..1f60e27523d9 100644 --- a/trunk/arch/mips/kernel/module.c +++ b/trunk/arch/mips/kernel/module.c @@ -68,6 +68,8 @@ void *module_alloc(unsigned long size) void module_free(struct module *mod, void *module_region) { vfree(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + table entries. */ } int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, diff --git a/trunk/arch/mn10300/kernel/module.c b/trunk/arch/mn10300/kernel/module.c index 4fa0e3648d8e..6b287f2e8e84 100644 --- a/trunk/arch/mn10300/kernel/module.c +++ b/trunk/arch/mn10300/kernel/module.c @@ -48,6 +48,8 @@ void *module_alloc(unsigned long size) void module_free(struct module *mod, void *module_region) { vfree(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + * table entries. */ } /* diff --git a/trunk/arch/parisc/kernel/module.c b/trunk/arch/parisc/kernel/module.c index ef5caf2e6ed0..ecd1c5024447 100644 --- a/trunk/arch/parisc/kernel/module.c +++ b/trunk/arch/parisc/kernel/module.c @@ -267,6 +267,8 @@ void module_free(struct module *mod, void *module_region) mod->arch.section = NULL; vfree(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + table entries. */ } /* Additional bytes needed in front of individual sections */ diff --git a/trunk/arch/powerpc/kernel/module.c b/trunk/arch/powerpc/kernel/module.c index 477c663e0140..43e7e3a7f130 100644 --- a/trunk/arch/powerpc/kernel/module.c +++ b/trunk/arch/powerpc/kernel/module.c @@ -43,6 +43,8 @@ void *module_alloc(unsigned long size) void module_free(struct module *mod, void *module_region) { vfree(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + table entries. */ } static const Elf_Shdr *find_section(const Elf_Ehdr *hdr, diff --git a/trunk/arch/s390/kernel/module.c b/trunk/arch/s390/kernel/module.c index ab2e3ed28abc..eed4a00cb676 100644 --- a/trunk/arch/s390/kernel/module.c +++ b/trunk/arch/s390/kernel/module.c @@ -56,6 +56,8 @@ void *module_alloc(unsigned long size) void module_free(struct module *mod, void *module_region) { vfree(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + table entries. */ } static void diff --git a/trunk/arch/sh/kernel/module.c b/trunk/arch/sh/kernel/module.c index c2efdcde266f..c19b0f7d2cc1 100644 --- a/trunk/arch/sh/kernel/module.c +++ b/trunk/arch/sh/kernel/module.c @@ -46,6 +46,8 @@ void *module_alloc(unsigned long size) void module_free(struct module *mod, void *module_region) { vfree(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + table entries. */ } /* We don't need anything special. */ diff --git a/trunk/arch/sparc/include/asm/uaccess_32.h b/trunk/arch/sparc/include/asm/uaccess_32.h index 8303ac481034..47d5619d43fa 100644 --- a/trunk/arch/sparc/include/asm/uaccess_32.h +++ b/trunk/arch/sparc/include/asm/uaccess_32.h @@ -17,9 +17,6 @@ #ifndef __ASSEMBLY__ -#define ARCH_HAS_SORT_EXTABLE -#define ARCH_HAS_SEARCH_EXTABLE - /* Sparc is not segmented, however we need to be able to fool access_ok() * when doing system calls from kernel mode legitimately. * diff --git a/trunk/arch/sparc/kernel/module.c b/trunk/arch/sparc/kernel/module.c index 0ee642f63234..90273765e81f 100644 --- a/trunk/arch/sparc/kernel/module.c +++ b/trunk/arch/sparc/kernel/module.c @@ -75,6 +75,8 @@ void *module_alloc(unsigned long size) void module_free(struct module *mod, void *module_region) { vfree(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + table entries. */ } /* Make generic code ignore STT_REGISTER dummy undefined symbols. */ diff --git a/trunk/arch/sparc/mm/extable.c b/trunk/arch/sparc/mm/extable.c index a61c349448e1..16cc28935e39 100644 --- a/trunk/arch/sparc/mm/extable.c +++ b/trunk/arch/sparc/mm/extable.c @@ -28,10 +28,6 @@ search_extable(const struct exception_table_entry *start, * word 3: last insn address + 4 bytes * word 4: fixup code address * - * Deleted entries are encoded as: - * word 1: unused - * word 2: -1 - * * See asm/uaccess.h for more details. */ @@ -43,10 +39,6 @@ search_extable(const struct exception_table_entry *start, continue; } - /* A deleted entry; see trim_init_extable */ - if (walk->fixup == -1) - continue; - if (walk->insn == value) return walk; } @@ -65,27 +57,6 @@ search_extable(const struct exception_table_entry *start, return NULL; } -#ifdef CONFIG_MODULES -/* We could memmove them around; easier to mark the trimmed ones. */ -void trim_init_extable(struct module *m) -{ - unsigned int i; - bool range; - - for (i = 0; i < m->num_exentries; i += range ? 2 : 1) { - range = m->extable[i].fixup == 0; - - if (within_module_init(m->extable[i].insn, m)) { - m->extable[i].fixup = -1; - if (range) - m->extable[i+1].fixup = -1; - } - if (range) - i++; - } -} -#endif /* CONFIG_MODULES */ - /* Special extable search, which handles ranges. Returns fixup */ unsigned long search_extables_range(unsigned long addr, unsigned long *g2) { diff --git a/trunk/arch/um/include/asm/pgtable.h b/trunk/arch/um/include/asm/pgtable.h index 9ce3f165111a..58da2480a7f4 100644 --- a/trunk/arch/um/include/asm/pgtable.h +++ b/trunk/arch/um/include/asm/pgtable.h @@ -53,21 +53,16 @@ extern unsigned long end_iomem; #else # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) #endif -#define MODULES_VADDR VMALLOC_START -#define MODULES_END VMALLOC_END -#define MODULES_LEN (MODULES_VADDR - MODULES_END) #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) -#define __PAGE_KERNEL_EXEC \ - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) + #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) -#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) /* * The i386 can't do page protection for execute, and considers that the same diff --git a/trunk/arch/um/sys-i386/Makefile b/trunk/arch/um/sys-i386/Makefile index 1b549bca4645..598b5c1903af 100644 --- a/trunk/arch/um/sys-i386/Makefile +++ b/trunk/arch/um/sys-i386/Makefile @@ -8,7 +8,7 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \ subarch-obj-y = lib/semaphore_32.o lib/string_32.o subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o -subarch-obj-$(CONFIG_MODULES) += kernel/module.o +subarch-obj-$(CONFIG_MODULES) += kernel/module_32.o USER_OBJS := bugs.o ptrace_user.o fault.o diff --git a/trunk/arch/um/sys-x86_64/Makefile b/trunk/arch/um/sys-x86_64/Makefile index 2201e9c20e4a..c8b4cce9cfe1 100644 --- a/trunk/arch/um/sys-x86_64/Makefile +++ b/trunk/arch/um/sys-x86_64/Makefile @@ -8,8 +8,10 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \ setjmp.o signal.o stub.o stub_segv.o syscalls.o syscall_table.o \ sysrq.o ksyms.o tls.o +obj-$(CONFIG_MODULES) += um_module.o + subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o -subarch-obj-$(CONFIG_MODULES) += kernel/module.o +subarch-obj-$(CONFIG_MODULES) += kernel/module_64.o ldt-y = ../sys-i386/ldt.o diff --git a/trunk/arch/um/sys-x86_64/um_module.c b/trunk/arch/um/sys-x86_64/um_module.c new file mode 100644 index 000000000000..3dead392a415 --- /dev/null +++ b/trunk/arch/um/sys-x86_64/um_module.c @@ -0,0 +1,21 @@ +#include +#include + +/* Copied from i386 arch/i386/kernel/module.c */ +void *module_alloc(unsigned long size) +{ + if (size == 0) + return NULL; + return vmalloc_exec(size); +} + +/* Free memory returned from module_alloc */ +void module_free(struct module *mod, void *module_region) +{ + vfree(module_region); + /* + * FIXME: If module_region == mod->init_region, trim exception + * table entries. + */ +} + diff --git a/trunk/arch/x86/include/asm/lguest.h b/trunk/arch/x86/include/asm/lguest.h index 313389cd50d2..1caf57628b9c 100644 --- a/trunk/arch/x86/include/asm/lguest.h +++ b/trunk/arch/x86/include/asm/lguest.h @@ -17,13 +17,8 @@ /* Pages for switcher itself, then two pages per cpu */ #define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids) -/* We map at -4M (-2M when PAE is activated) for ease of mapping - * into the guest (one PTE page). */ -#ifdef CONFIG_X86_PAE -#define SWITCHER_ADDR 0xFFE00000 -#else +/* We map at -4M for ease of mapping into the guest (one PTE page). */ #define SWITCHER_ADDR 0xFFC00000 -#endif /* Found in switcher.S */ extern unsigned long default_idt_entries[]; diff --git a/trunk/arch/x86/include/asm/lguest_hcall.h b/trunk/arch/x86/include/asm/lguest_hcall.h index d31c4a684078..faae1996487b 100644 --- a/trunk/arch/x86/include/asm/lguest_hcall.h +++ b/trunk/arch/x86/include/asm/lguest_hcall.h @@ -12,13 +12,11 @@ #define LHCALL_TS 8 #define LHCALL_SET_CLOCKEVENT 9 #define LHCALL_HALT 10 -#define LHCALL_SET_PMD 13 #define LHCALL_SET_PTE 14 -#define LHCALL_SET_PGD 15 +#define LHCALL_SET_PMD 15 #define LHCALL_LOAD_TLS 16 #define LHCALL_NOTIFY 17 #define LHCALL_LOAD_GDT_ENTRY 18 -#define LHCALL_SEND_INTERRUPTS 19 #define LGUEST_TRAP_ENTRY 0x1F @@ -34,10 +32,10 @@ * operations? There are two ways: the direct way is to make a "hypercall", * to make requests of the Host Itself. * - * We use the KVM hypercall mechanism. Seventeen hypercalls are + * We use the KVM hypercall mechanism. Eighteen hypercalls are * available: the hypercall number is put in the %eax register, and the - * arguments (when required) are placed in %ebx, %ecx, %edx and %esi. - * If a return value makes sense, it's returned in %eax. + * arguments (when required) are placed in %ebx, %ecx and %edx. If a return + * value makes sense, it's returned in %eax. * * Grossly invalid calls result in Sudden Death at the hands of the vengeful * Host, rather than returning failure. This reflects Winston Churchill's @@ -49,9 +47,8 @@ #define LHCALL_RING_SIZE 64 struct hcall_args { - /* These map directly onto eax, ebx, ecx, edx and esi - * in struct lguest_regs */ - unsigned long arg0, arg1, arg2, arg3, arg4; + /* These map directly onto eax, ebx, ecx, edx in struct lguest_regs */ + unsigned long arg0, arg1, arg2, arg3; }; #endif /* !__ASSEMBLY__ */ diff --git a/trunk/arch/x86/include/asm/pgtable_32_types.h b/trunk/arch/x86/include/asm/pgtable_32_types.h index 5e67c1532314..2733fad45f98 100644 --- a/trunk/arch/x86/include/asm/pgtable_32_types.h +++ b/trunk/arch/x86/include/asm/pgtable_32_types.h @@ -46,10 +46,6 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */ # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) #endif -#define MODULES_VADDR VMALLOC_START -#define MODULES_END VMALLOC_END -#define MODULES_LEN (MODULES_VADDR - MODULES_END) - #define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE) #endif /* _ASM_X86_PGTABLE_32_DEFS_H */ diff --git a/trunk/arch/x86/kernel/Makefile b/trunk/arch/x86/kernel/Makefile index f3477bb84566..4f78bd682125 100644 --- a/trunk/arch/x86/kernel/Makefile +++ b/trunk/arch/x86/kernel/Makefile @@ -73,7 +73,7 @@ obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o obj-$(CONFIG_KPROBES) += kprobes.o -obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_MODULES) += module_$(BITS).o obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o obj-$(CONFIG_KGDB) += kgdb.o diff --git a/trunk/arch/x86/kernel/asm-offsets_32.c b/trunk/arch/x86/kernel/asm-offsets_32.c index dfdbf6403895..1a830cbd7015 100644 --- a/trunk/arch/x86/kernel/asm-offsets_32.c +++ b/trunk/arch/x86/kernel/asm-offsets_32.c @@ -126,7 +126,6 @@ void foo(void) #if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE) BLANK(); OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled); - OFFSET(LGUEST_DATA_irq_pending, lguest_data, irq_pending); OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir); BLANK(); diff --git a/trunk/arch/x86/kernel/module_32.c b/trunk/arch/x86/kernel/module_32.c new file mode 100644 index 000000000000..0edd819050e7 --- /dev/null +++ b/trunk/arch/x86/kernel/module_32.c @@ -0,0 +1,152 @@ +/* Kernel module help for i386. + Copyright (C) 2001 Rusty Russell. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ +#include +#include +#include +#include +#include +#include +#include + +#if 0 +#define DEBUGP printk +#else +#define DEBUGP(fmt...) +#endif + +void *module_alloc(unsigned long size) +{ + if (size == 0) + return NULL; + return vmalloc_exec(size); +} + + +/* Free memory returned from module_alloc */ +void module_free(struct module *mod, void *module_region) +{ + vfree(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + table entries. */ +} + +/* We don't need anything special. */ +int module_frob_arch_sections(Elf_Ehdr *hdr, + Elf_Shdr *sechdrs, + char *secstrings, + struct module *mod) +{ + return 0; +} + +int apply_relocate(Elf32_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + unsigned int i; + Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; + Elf32_Sym *sym; + uint32_t *location; + + DEBUGP("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* This is where to make the change */ + location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset; + /* This is the symbol it is referring to. Note that all + undefined symbols have been resolved. */ + sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + + ELF32_R_SYM(rel[i].r_info); + + switch (ELF32_R_TYPE(rel[i].r_info)) { + case R_386_32: + /* We add the value into the location given */ + *location += sym->st_value; + break; + case R_386_PC32: + /* Add the value, subtract its postition */ + *location += sym->st_value - (uint32_t)location; + break; + default: + printk(KERN_ERR "module %s: Unknown relocation: %u\n", + me->name, ELF32_R_TYPE(rel[i].r_info)); + return -ENOEXEC; + } + } + return 0; +} + +int apply_relocate_add(Elf32_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n", + me->name); + return -ENOEXEC; +} + +int module_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *me) +{ + const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL, + *para = NULL; + char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { + if (!strcmp(".text", secstrings + s->sh_name)) + text = s; + if (!strcmp(".altinstructions", secstrings + s->sh_name)) + alt = s; + if (!strcmp(".smp_locks", secstrings + s->sh_name)) + locks = s; + if (!strcmp(".parainstructions", secstrings + s->sh_name)) + para = s; + } + + if (alt) { + /* patch .altinstructions */ + void *aseg = (void *)alt->sh_addr; + apply_alternatives(aseg, aseg + alt->sh_size); + } + if (locks && text) { + void *lseg = (void *)locks->sh_addr; + void *tseg = (void *)text->sh_addr; + alternatives_smp_module_add(me, me->name, + lseg, lseg + locks->sh_size, + tseg, tseg + text->sh_size); + } + + if (para) { + void *pseg = (void *)para->sh_addr; + apply_paravirt(pseg, pseg + para->sh_size); + } + + return module_bug_finalize(hdr, sechdrs, me); +} + +void module_arch_cleanup(struct module *mod) +{ + alternatives_smp_module_del(mod); + module_bug_cleanup(mod); +} diff --git a/trunk/arch/x86/kernel/module.c b/trunk/arch/x86/kernel/module_64.c similarity index 74% rename from trunk/arch/x86/kernel/module.c rename to trunk/arch/x86/kernel/module_64.c index 89f386f044e4..c23880b90b5c 100644 --- a/trunk/arch/x86/kernel/module.c +++ b/trunk/arch/x86/kernel/module_64.c @@ -1,5 +1,6 @@ -/* Kernel module help for x86. +/* Kernel module help for x86-64 Copyright (C) 2001 Rusty Russell. + Copyright (C) 2002,2003 Andi Kleen, SuSE Labs. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -21,18 +22,23 @@ #include #include #include -#include #include +#include +#include #include #include #include -#if 0 -#define DEBUGP printk -#else #define DEBUGP(fmt...) -#endif + +#ifndef CONFIG_UML +void module_free(struct module *mod, void *module_region) +{ + vfree(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + table entries. */ +} void *module_alloc(unsigned long size) { @@ -48,15 +54,9 @@ void *module_alloc(unsigned long size) if (!area) return NULL; - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM, - PAGE_KERNEL_EXEC); -} - -/* Free memory returned from module_alloc */ -void module_free(struct module *mod, void *module_region) -{ - vfree(module_region); + return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC); } +#endif /* We don't need anything special. */ int module_frob_arch_sections(Elf_Ehdr *hdr, @@ -67,58 +67,6 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, return 0; } -#ifdef CONFIG_X86_32 -int apply_relocate(Elf32_Shdr *sechdrs, - const char *strtab, - unsigned int symindex, - unsigned int relsec, - struct module *me) -{ - unsigned int i; - Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; - Elf32_Sym *sym; - uint32_t *location; - - DEBUGP("Applying relocate section %u to %u\n", relsec, - sechdrs[relsec].sh_info); - for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { - /* This is where to make the change */ - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr - + rel[i].r_offset; - /* This is the symbol it is referring to. Note that all - undefined symbols have been resolved. */ - sym = (Elf32_Sym *)sechdrs[symindex].sh_addr - + ELF32_R_SYM(rel[i].r_info); - - switch (ELF32_R_TYPE(rel[i].r_info)) { - case R_386_32: - /* We add the value into the location given */ - *location += sym->st_value; - break; - case R_386_PC32: - /* Add the value, subtract its postition */ - *location += sym->st_value - (uint32_t)location; - break; - default: - printk(KERN_ERR "module %s: Unknown relocation: %u\n", - me->name, ELF32_R_TYPE(rel[i].r_info)); - return -ENOEXEC; - } - } - return 0; -} - -int apply_relocate_add(Elf32_Shdr *sechdrs, - const char *strtab, - unsigned int symindex, - unsigned int relsec, - struct module *me) -{ - printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n", - me->name); - return -ENOEXEC; -} -#else /*X86_64*/ int apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex, @@ -199,8 +147,6 @@ int apply_relocate(Elf_Shdr *sechdrs, return -ENOSYS; } -#endif - int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) diff --git a/trunk/arch/x86/kernel/setup.c b/trunk/arch/x86/kernel/setup.c index be5ae80f897f..d1c636bf31a7 100644 --- a/trunk/arch/x86/kernel/setup.c +++ b/trunk/arch/x86/kernel/setup.c @@ -301,13 +301,15 @@ static void __init reserve_brk(void) #ifdef CONFIG_BLK_DEV_INITRD +#ifdef CONFIG_X86_32 + #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) static void __init relocate_initrd(void) { u64 ramdisk_image = boot_params.hdr.ramdisk_image; u64 ramdisk_size = boot_params.hdr.ramdisk_size; - u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; + u64 end_of_lowmem = max_low_pfn << PAGE_SHIFT; u64 ramdisk_here; unsigned long slop, clen, mapaddr; char *p, *q; @@ -363,13 +365,14 @@ static void __init relocate_initrd(void) ramdisk_image, ramdisk_image + ramdisk_size - 1, ramdisk_here, ramdisk_here + ramdisk_size - 1); } +#endif static void __init reserve_initrd(void) { u64 ramdisk_image = boot_params.hdr.ramdisk_image; u64 ramdisk_size = boot_params.hdr.ramdisk_size; u64 ramdisk_end = ramdisk_image + ramdisk_size; - u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT; + u64 end_of_lowmem = max_low_pfn << PAGE_SHIFT; if (!boot_params.hdr.type_of_loader || !ramdisk_image || !ramdisk_size) @@ -399,8 +402,14 @@ static void __init reserve_initrd(void) return; } +#ifdef CONFIG_X86_32 relocate_initrd(); - +#else + printk(KERN_ERR "initrd extends beyond end of memory " + "(0x%08llx > 0x%08llx)\ndisabling initrd\n", + ramdisk_end, end_of_lowmem); + initrd_start = 0; +#endif free_early(ramdisk_image, ramdisk_end); } #else diff --git a/trunk/arch/x86/kernel/vmlinux.lds.S b/trunk/arch/x86/kernel/vmlinux.lds.S index 367e87882041..4c85b2e2bb65 100644 --- a/trunk/arch/x86/kernel/vmlinux.lds.S +++ b/trunk/arch/x86/kernel/vmlinux.lds.S @@ -108,8 +108,6 @@ SECTIONS /* Data */ . = ALIGN(PAGE_SIZE); .data : AT(ADDR(.data) - LOAD_OFFSET) { - /* Start of data section */ - _sdata = .; DATA_DATA CONSTRUCTORS diff --git a/trunk/arch/x86/lguest/Kconfig b/trunk/arch/x86/lguest/Kconfig index 38718041efc3..8dab8f7844d3 100644 --- a/trunk/arch/x86/lguest/Kconfig +++ b/trunk/arch/x86/lguest/Kconfig @@ -2,6 +2,7 @@ config LGUEST_GUEST bool "Lguest guest support" select PARAVIRT depends on X86_32 + depends on !X86_PAE select VIRTIO select VIRTIO_RING select VIRTIO_CONSOLE diff --git a/trunk/arch/x86/lguest/boot.c b/trunk/arch/x86/lguest/boot.c index 7bc65f0f62c4..4e0c26559395 100644 --- a/trunk/arch/x86/lguest/boot.c +++ b/trunk/arch/x86/lguest/boot.c @@ -87,7 +87,7 @@ struct lguest_data lguest_data = { /*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a * ring buffer of stored hypercalls which the Host will run though next time we - * do a normal hypercall. Each entry in the ring has 5 slots for the hypercall + * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall * arguments, and a "hcall_status" word which is 0 if the call is ready to go, * and 255 once the Host has finished with it. * @@ -96,8 +96,7 @@ struct lguest_data lguest_data = { * effect of causing the Host to run all the stored calls in the ring buffer * which empties it for next time! */ static void async_hcall(unsigned long call, unsigned long arg1, - unsigned long arg2, unsigned long arg3, - unsigned long arg4) + unsigned long arg2, unsigned long arg3) { /* Note: This code assumes we're uniprocessor. */ static unsigned int next_call; @@ -109,13 +108,12 @@ static void async_hcall(unsigned long call, unsigned long arg1, local_irq_save(flags); if (lguest_data.hcall_status[next_call] != 0xFF) { /* Table full, so do normal hcall which will flush table. */ - kvm_hypercall4(call, arg1, arg2, arg3, arg4); + kvm_hypercall3(call, arg1, arg2, arg3); } else { lguest_data.hcalls[next_call].arg0 = call; lguest_data.hcalls[next_call].arg1 = arg1; lguest_data.hcalls[next_call].arg2 = arg2; lguest_data.hcalls[next_call].arg3 = arg3; - lguest_data.hcalls[next_call].arg4 = arg4; /* Arguments must all be written before we mark it to go */ wmb(); lguest_data.hcall_status[next_call] = 0; @@ -143,7 +141,7 @@ static void lazy_hcall1(unsigned long call, if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) kvm_hypercall1(call, arg1); else - async_hcall(call, arg1, 0, 0, 0); + async_hcall(call, arg1, 0, 0); } static void lazy_hcall2(unsigned long call, @@ -153,7 +151,7 @@ static void lazy_hcall2(unsigned long call, if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) kvm_hypercall2(call, arg1, arg2); else - async_hcall(call, arg1, arg2, 0, 0); + async_hcall(call, arg1, arg2, 0); } static void lazy_hcall3(unsigned long call, @@ -164,23 +162,9 @@ static void lazy_hcall3(unsigned long call, if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) kvm_hypercall3(call, arg1, arg2, arg3); else - async_hcall(call, arg1, arg2, arg3, 0); + async_hcall(call, arg1, arg2, arg3); } -#ifdef CONFIG_X86_PAE -static void lazy_hcall4(unsigned long call, - unsigned long arg1, - unsigned long arg2, - unsigned long arg3, - unsigned long arg4) -{ - if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) - kvm_hypercall4(call, arg1, arg2, arg3, arg4); - else - async_hcall(call, arg1, arg2, arg3, arg4); -} -#endif - /* When lazy mode is turned off reset the per-cpu lazy mode variable and then * issue the do-nothing hypercall to flush any stored calls. */ static void lguest_leave_lazy_mmu_mode(void) @@ -195,7 +179,7 @@ static void lguest_end_context_switch(struct task_struct *next) paravirt_end_context_switch(next); } -/*G:032 +/*G:033 * After that diversion we return to our first native-instruction * replacements: four functions for interrupt control. * @@ -215,28 +199,30 @@ static unsigned long save_fl(void) { return lguest_data.irq_enabled; } +PV_CALLEE_SAVE_REGS_THUNK(save_fl); + +/* restore_flags() just sets the flags back to the value given. */ +static void restore_fl(unsigned long flags) +{ + lguest_data.irq_enabled = flags; +} +PV_CALLEE_SAVE_REGS_THUNK(restore_fl); /* Interrupts go off... */ static void irq_disable(void) { lguest_data.irq_enabled = 0; } - -/* Let's pause a moment. Remember how I said these are called so often? - * Jeremy Fitzhardinge optimized them so hard early in 2009 that he had to - * break some rules. In particular, these functions are assumed to save their - * own registers if they need to: normal C functions assume they can trash the - * eax register. To use normal C functions, we use - * PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the - * C function, then restores it. */ -PV_CALLEE_SAVE_REGS_THUNK(save_fl); PV_CALLEE_SAVE_REGS_THUNK(irq_disable); -/*:*/ -/* These are in i386_head.S */ -extern void lg_irq_enable(void); -extern void lg_restore_fl(unsigned long flags); +/* Interrupts go on... */ +static void irq_enable(void) +{ + lguest_data.irq_enabled = X86_EFLAGS_IF; +} +PV_CALLEE_SAVE_REGS_THUNK(irq_enable); +/*:*/ /*M:003 Note that we don't check for outstanding interrupts when we re-enable * them (or when we unmask an interrupt). This seems to work for the moment, * since interrupts are rare and we'll just get the interrupt on the next timer @@ -382,8 +368,8 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, case 1: /* Basic feature request. */ /* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */ *cx &= 0x00002201; - /* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU, PAE. */ - *dx &= 0x07808151; + /* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU. */ + *dx &= 0x07808111; /* The Host can do a nice optimization if it knows that the * kernel mappings (addresses above 0xC0000000 or whatever * PAGE_OFFSET is set to) haven't changed. But Linux calls @@ -402,11 +388,6 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx, if (*ax > 0x80000008) *ax = 0x80000008; break; - case 0x80000001: - /* Here we should fix nx cap depending on host. */ - /* For this version of PAE, we just clear NX bit. */ - *dx &= ~(1 << 20); - break; } } @@ -540,52 +521,25 @@ static void lguest_write_cr4(unsigned long val) static void lguest_pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { -#ifdef CONFIG_X86_PAE - lazy_hcall4(LHCALL_SET_PTE, __pa(mm->pgd), addr, - ptep->pte_low, ptep->pte_high); -#else lazy_hcall3(LHCALL_SET_PTE, __pa(mm->pgd), addr, ptep->pte_low); -#endif } static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { - native_set_pte(ptep, pteval); + *ptep = pteval; lguest_pte_update(mm, addr, ptep); } -/* The Guest calls lguest_set_pud to set a top-level entry and lguest_set_pmd - * to set a middle-level entry when PAE is activated. - * Again, we set the entry then tell the Host which page we changed, - * and the index of the entry we changed. */ -#ifdef CONFIG_X86_PAE -static void lguest_set_pud(pud_t *pudp, pud_t pudval) -{ - native_set_pud(pudp, pudval); - - /* 32 bytes aligned pdpt address and the index. */ - lazy_hcall2(LHCALL_SET_PGD, __pa(pudp) & 0xFFFFFFE0, - (__pa(pudp) & 0x1F) / sizeof(pud_t)); -} - +/* The Guest calls this to set a top-level entry. Again, we set the entry then + * tell the Host which top-level page we changed, and the index of the entry we + * changed. */ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) { - native_set_pmd(pmdp, pmdval); + *pmdp = pmdval; lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK, - (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t)); + (__pa(pmdp) & (PAGE_SIZE - 1)) / 4); } -#else - -/* The Guest calls lguest_set_pmd to set a top-level entry when PAE is not - * activated. */ -static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) -{ - native_set_pmd(pmdp, pmdval); - lazy_hcall2(LHCALL_SET_PGD, __pa(pmdp) & PAGE_MASK, - (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t)); -} -#endif /* There are a couple of legacy places where the kernel sets a PTE, but we * don't know the top level any more. This is useless for us, since we don't @@ -598,31 +552,11 @@ static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) * which brings boot back to 0.25 seconds. */ static void lguest_set_pte(pte_t *ptep, pte_t pteval) { - native_set_pte(ptep, pteval); - if (cr3_changed) - lazy_hcall1(LHCALL_FLUSH_TLB, 1); -} - -#ifdef CONFIG_X86_PAE -static void lguest_set_pte_atomic(pte_t *ptep, pte_t pte) -{ - native_set_pte_atomic(ptep, pte); + *ptep = pteval; if (cr3_changed) lazy_hcall1(LHCALL_FLUSH_TLB, 1); } -void lguest_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - native_pte_clear(mm, addr, ptep); - lguest_pte_update(mm, addr, ptep); -} - -void lguest_pmd_clear(pmd_t *pmdp) -{ - lguest_set_pmd(pmdp, __pmd(0)); -} -#endif - /* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on * native page table operations. On native hardware you can set a new page * table entry whenever you want, but if you want to remove one you have to do @@ -694,12 +628,13 @@ static void __init lguest_init_IRQ(void) { unsigned int i; - for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) { + for (i = 0; i < LGUEST_IRQS; i++) { + int vector = FIRST_EXTERNAL_VECTOR + i; /* Some systems map "vectors" to interrupts weirdly. Lguest has * a straightforward 1 to 1 mapping, so force that here. */ - __get_cpu_var(vector_irq)[i] = i - FIRST_EXTERNAL_VECTOR; - if (i != SYSCALL_VECTOR) - set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]); + __get_cpu_var(vector_irq)[vector] = i; + if (vector != SYSCALL_VECTOR) + set_intr_gate(vector, interrupt[i]); } /* This call is required to set up for 4k stacks, where we have * separate stacks for hard and soft interrupts. */ @@ -1038,10 +973,10 @@ static void lguest_restart(char *reason) * * Our current solution is to allow the paravirt back end to optionally patch * over the indirect calls to replace them with something more efficient. We - * patch two of the simplest of the most commonly called functions: disable - * interrupts and save interrupts. We usually have 6 or 10 bytes to patch - * into: the Guest versions of these operations are small enough that we can - * fit comfortably. + * patch the four most commonly called functions: disable interrupts, enable + * interrupts, restore interrupts and save interrupts. We usually have 6 or 10 + * bytes to patch into: the Guest versions of these operations are small enough + * that we can fit comfortably. * * First we need assembly templates of each of the patchable Guest operations, * and these are in i386_head.S. */ @@ -1052,6 +987,8 @@ static const struct lguest_insns const char *start, *end; } lguest_insns[] = { [PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli }, + [PARAVIRT_PATCH(pv_irq_ops.irq_enable)] = { lgstart_sti, lgend_sti }, + [PARAVIRT_PATCH(pv_irq_ops.restore_fl)] = { lgstart_popf, lgend_popf }, [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf }, }; @@ -1089,7 +1026,6 @@ __init void lguest_init(void) pv_info.name = "lguest"; pv_info.paravirt_enabled = 1; pv_info.kernel_rpl = 1; - pv_info.shared_kernel_pmd = 1; /* We set up all the lguest overrides for sensitive operations. These * are detailed with the operations themselves. */ @@ -1097,9 +1033,9 @@ __init void lguest_init(void) /* interrupt-related operations */ pv_irq_ops.init_IRQ = lguest_init_IRQ; pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl); - pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl); + pv_irq_ops.restore_fl = PV_CALLEE_SAVE(restore_fl); pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable); - pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable); + pv_irq_ops.irq_enable = PV_CALLEE_SAVE(irq_enable); pv_irq_ops.safe_halt = lguest_safe_halt; /* init-time operations */ @@ -1135,12 +1071,6 @@ __init void lguest_init(void) pv_mmu_ops.set_pte = lguest_set_pte; pv_mmu_ops.set_pte_at = lguest_set_pte_at; pv_mmu_ops.set_pmd = lguest_set_pmd; -#ifdef CONFIG_X86_PAE - pv_mmu_ops.set_pte_atomic = lguest_set_pte_atomic; - pv_mmu_ops.pte_clear = lguest_pte_clear; - pv_mmu_ops.pmd_clear = lguest_pmd_clear; - pv_mmu_ops.set_pud = lguest_set_pud; -#endif pv_mmu_ops.read_cr2 = lguest_read_cr2; pv_mmu_ops.read_cr3 = lguest_read_cr3; pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; diff --git a/trunk/arch/x86/lguest/i386_head.S b/trunk/arch/x86/lguest/i386_head.S index a9c8cfe61cd4..f79541989471 100644 --- a/trunk/arch/x86/lguest/i386_head.S +++ b/trunk/arch/x86/lguest/i386_head.S @@ -46,64 +46,10 @@ ENTRY(lguest_entry) .globl lgstart_##name; .globl lgend_##name LGUEST_PATCH(cli, movl $0, lguest_data+LGUEST_DATA_irq_enabled) +LGUEST_PATCH(sti, movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled) +LGUEST_PATCH(popf, movl %eax, lguest_data+LGUEST_DATA_irq_enabled) LGUEST_PATCH(pushf, movl lguest_data+LGUEST_DATA_irq_enabled, %eax) - -/*G:033 But using those wrappers is inefficient (we'll see why that doesn't - * matter for save_fl and irq_disable later). If we write our routines - * carefully in assembler, we can avoid clobbering any registers and avoid - * jumping through the wrapper functions. - * - * I skipped over our first piece of assembler, but this one is worth studying - * in a bit more detail so I'll describe in easy stages. First, the routine - * to enable interrupts: */ -ENTRY(lg_irq_enable) - /* The reverse of irq_disable, this sets lguest_data.irq_enabled to - * X86_EFLAGS_IF (ie. "Interrupts enabled"). */ - movl $X86_EFLAGS_IF, lguest_data+LGUEST_DATA_irq_enabled - /* But now we need to check if the Host wants to know: there might have - * been interrupts waiting to be delivered, in which case it will have - * set lguest_data.irq_pending to X86_EFLAGS_IF. If it's not zero, we - * jump to send_interrupts, otherwise we're done. */ - testl $0, lguest_data+LGUEST_DATA_irq_pending - jnz send_interrupts - /* One cool thing about x86 is that you can do many things without using - * a register. In this case, the normal path hasn't needed to save or - * restore any registers at all! */ - ret -send_interrupts: - /* OK, now we need a register: eax is used for the hypercall number, - * which is LHCALL_SEND_INTERRUPTS. - * - * We used not to bother with this pending detection at all, which was - * much simpler. Sooner or later the Host would realize it had to - * send us an interrupt. But that turns out to make performance 7 - * times worse on a simple tcp benchmark. So now we do this the hard - * way. */ - pushl %eax - movl $LHCALL_SEND_INTERRUPTS, %eax - /* This is a vmcall instruction (same thing that KVM uses). Older - * assembler versions might not know the "vmcall" instruction, so we - * create one manually here. */ - .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */ - popl %eax - ret - -/* Finally, the "popf" or "restore flags" routine. The %eax register holds the - * flags (in practice, either X86_EFLAGS_IF or 0): if it's X86_EFLAGS_IF we're - * enabling interrupts again, if it's 0 we're leaving them off. */ -ENTRY(lg_restore_fl) - /* This is just "lguest_data.irq_enabled = flags;" */ - movl %eax, lguest_data+LGUEST_DATA_irq_enabled - /* Now, if the %eax value has enabled interrupts and - * lguest_data.irq_pending is set, we want to tell the Host so it can - * deliver any outstanding interrupts. Fortunately, both values will - * be X86_EFLAGS_IF (ie. 512) in that case, and the "testl" - * instruction will AND them together for us. If both are set, we - * jump to send_interrupts. */ - testl lguest_data+LGUEST_DATA_irq_pending, %eax - jnz send_interrupts - /* Again, the normal path has used no extra registers. Clever, huh? */ - ret +/*:*/ /* These demark the EIP range where host should never deliver interrupts. */ .global lguest_noirq_start diff --git a/trunk/arch/xtensa/kernel/module.c b/trunk/arch/xtensa/kernel/module.c index c1accea8cb56..3981a466c779 100644 --- a/trunk/arch/xtensa/kernel/module.c +++ b/trunk/arch/xtensa/kernel/module.c @@ -34,6 +34,8 @@ void *module_alloc(unsigned long size) void module_free(struct module *mod, void *module_region) { vfree(module_region); + /* FIXME: If module_region == mod->init_region, trim exception + table entries. */ } int module_frob_arch_sections(Elf32_Ehdr *hdr, diff --git a/trunk/drivers/base/firmware_class.c b/trunk/drivers/base/firmware_class.c index 8a267c427629..d3a59c688fe4 100644 --- a/trunk/drivers/base/firmware_class.c +++ b/trunk/drivers/base/firmware_class.c @@ -17,7 +17,7 @@ #include #include #include -#include + #include #include "base.h" @@ -45,10 +45,7 @@ struct firmware_priv { struct bin_attribute attr_data; struct firmware *fw; unsigned long status; - struct page **pages; - int nr_pages; - int page_array_size; - const char *vdata; + int alloc_size; struct timer_list timeout; }; @@ -125,10 +122,6 @@ static ssize_t firmware_loading_show(struct device *dev, return sprintf(buf, "%d\n", loading); } -/* Some architectures don't have PAGE_KERNEL_RO */ -#ifndef PAGE_KERNEL_RO -#define PAGE_KERNEL_RO PAGE_KERNEL -#endif /** * firmware_loading_store - set value in the 'loading' control file * @dev: device pointer @@ -148,7 +141,6 @@ static ssize_t firmware_loading_store(struct device *dev, { struct firmware_priv *fw_priv = dev_get_drvdata(dev); int loading = simple_strtol(buf, NULL, 10); - int i; switch (loading) { case 1: @@ -159,30 +151,13 @@ static ssize_t firmware_loading_store(struct device *dev, } vfree(fw_priv->fw->data); fw_priv->fw->data = NULL; - for (i = 0; i < fw_priv->nr_pages; i++) - __free_page(fw_priv->pages[i]); - kfree(fw_priv->pages); - fw_priv->pages = NULL; - fw_priv->page_array_size = 0; - fw_priv->nr_pages = 0; fw_priv->fw->size = 0; + fw_priv->alloc_size = 0; set_bit(FW_STATUS_LOADING, &fw_priv->status); mutex_unlock(&fw_lock); break; case 0: if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) { - vfree(fw_priv->fw->data); - fw_priv->fw->data = vmap(fw_priv->pages, - fw_priv->nr_pages, - 0, PAGE_KERNEL_RO); - if (!fw_priv->fw->data) { - dev_err(dev, "%s: vmap() failed\n", __func__); - goto err; - } - /* Pages will be freed by vfree() */ - fw_priv->pages = NULL; - fw_priv->page_array_size = 0; - fw_priv->nr_pages = 0; complete(&fw_priv->completion); clear_bit(FW_STATUS_LOADING, &fw_priv->status); break; @@ -192,7 +167,6 @@ static ssize_t firmware_loading_store(struct device *dev, dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); /* fallthrough */ case -1: - err: fw_load_abort(fw_priv); break; } @@ -217,28 +191,8 @@ firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr, ret_count = -ENODEV; goto out; } - if (offset > fw->size) - return 0; - if (count > fw->size - offset) - count = fw->size - offset; - - ret_count = count; - - while (count) { - void *page_data; - int page_nr = offset >> PAGE_SHIFT; - int page_ofs = offset & (PAGE_SIZE-1); - int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); - - page_data = kmap(fw_priv->pages[page_nr]); - - memcpy(buffer, page_data + page_ofs, page_cnt); - - kunmap(fw_priv->pages[page_nr]); - buffer += page_cnt; - offset += page_cnt; - count -= page_cnt; - } + ret_count = memory_read_from_buffer(buffer, count, &offset, + fw->data, fw->size); out: mutex_unlock(&fw_lock); return ret_count; @@ -247,39 +201,27 @@ firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr, static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) { - int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT; - - /* If the array of pages is too small, grow it... */ - if (fw_priv->page_array_size < pages_needed) { - int new_array_size = max(pages_needed, - fw_priv->page_array_size * 2); - struct page **new_pages; - - new_pages = kmalloc(new_array_size * sizeof(void *), - GFP_KERNEL); - if (!new_pages) { - fw_load_abort(fw_priv); - return -ENOMEM; - } - memcpy(new_pages, fw_priv->pages, - fw_priv->page_array_size * sizeof(void *)); - memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) * - (new_array_size - fw_priv->page_array_size)); - kfree(fw_priv->pages); - fw_priv->pages = new_pages; - fw_priv->page_array_size = new_array_size; - } + u8 *new_data; + int new_size = fw_priv->alloc_size; - while (fw_priv->nr_pages < pages_needed) { - fw_priv->pages[fw_priv->nr_pages] = - alloc_page(GFP_KERNEL | __GFP_HIGHMEM); + if (min_size <= fw_priv->alloc_size) + return 0; - if (!fw_priv->pages[fw_priv->nr_pages]) { - fw_load_abort(fw_priv); - return -ENOMEM; - } - fw_priv->nr_pages++; + new_size = ALIGN(min_size, PAGE_SIZE); + new_data = vmalloc(new_size); + if (!new_data) { + printk(KERN_ERR "%s: unable to alloc buffer\n", __func__); + /* Make sure that we don't keep incomplete data */ + fw_load_abort(fw_priv); + return -ENOMEM; } + fw_priv->alloc_size = new_size; + if (fw_priv->fw->data) { + memcpy(new_data, fw_priv->fw->data, fw_priv->fw->size); + vfree(fw_priv->fw->data); + } + fw_priv->fw->data = new_data; + BUG_ON(min_size > fw_priv->alloc_size); return 0; } @@ -316,25 +258,10 @@ firmware_data_write(struct kobject *kobj, struct bin_attribute *bin_attr, if (retval) goto out; - retval = count; - - while (count) { - void *page_data; - int page_nr = offset >> PAGE_SHIFT; - int page_ofs = offset & (PAGE_SIZE - 1); - int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count); - - page_data = kmap(fw_priv->pages[page_nr]); - - memcpy(page_data + page_ofs, buffer, page_cnt); + memcpy((u8 *)fw->data + offset, buffer, count); - kunmap(fw_priv->pages[page_nr]); - buffer += page_cnt; - offset += page_cnt; - count -= page_cnt; - } - - fw->size = max_t(size_t, offset, fw->size); + fw->size = max_t(size_t, offset + count, fw->size); + retval = count; out: mutex_unlock(&fw_lock); return retval; @@ -350,11 +277,7 @@ static struct bin_attribute firmware_attr_data_tmpl = { static void fw_dev_release(struct device *dev) { struct firmware_priv *fw_priv = dev_get_drvdata(dev); - int i; - for (i = 0; i < fw_priv->nr_pages; i++) - __free_page(fw_priv->pages[i]); - kfree(fw_priv->pages); kfree(fw_priv); kfree(dev); diff --git a/trunk/drivers/block/virtio_blk.c b/trunk/drivers/block/virtio_blk.c index 43db3ea15b54..c0facaa55cf4 100644 --- a/trunk/drivers/block/virtio_blk.c +++ b/trunk/drivers/block/virtio_blk.c @@ -254,7 +254,7 @@ static int index_to_minor(int index) return index << PART_BITS; } -static int __devinit virtblk_probe(struct virtio_device *vdev) +static int virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; int err; @@ -288,7 +288,7 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) sg_init_table(vblk->sg, vblk->sg_elems); /* We expect one virtqueue, for output. */ - vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests"); + vblk->vq = vdev->config->find_vq(vdev, 0, blk_done); if (IS_ERR(vblk->vq)) { err = PTR_ERR(vblk->vq); goto out_free_vblk; @@ -388,14 +388,14 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) out_mempool: mempool_destroy(vblk->pool); out_free_vq: - vdev->config->del_vqs(vdev); + vdev->config->del_vq(vblk->vq); out_free_vblk: kfree(vblk); out: return err; } -static void __devexit virtblk_remove(struct virtio_device *vdev) +static void virtblk_remove(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; @@ -409,7 +409,7 @@ static void __devexit virtblk_remove(struct virtio_device *vdev) blk_cleanup_queue(vblk->disk->queue); put_disk(vblk->disk); mempool_destroy(vblk->pool); - vdev->config->del_vqs(vdev); + vdev->config->del_vq(vblk->vq); kfree(vblk); } diff --git a/trunk/drivers/char/hw_random/virtio-rng.c b/trunk/drivers/char/hw_random/virtio-rng.c index 32216b623248..86e83f883139 100644 --- a/trunk/drivers/char/hw_random/virtio-rng.c +++ b/trunk/drivers/char/hw_random/virtio-rng.c @@ -35,13 +35,13 @@ static DECLARE_COMPLETION(have_data); static void random_recv_done(struct virtqueue *vq) { - unsigned int len; + int len; /* We can get spurious callbacks, e.g. shared IRQs + virtio_pci. */ if (!vq->vq_ops->get_buf(vq, &len)) return; - data_left += len; + data_left = len / sizeof(random_data[0]); complete(&have_data); } @@ -49,7 +49,7 @@ static void register_buffer(void) { struct scatterlist sg; - sg_init_one(&sg, random_data+data_left, RANDOM_DATA_SIZE-data_left); + sg_init_one(&sg, random_data, RANDOM_DATA_SIZE); /* There should always be room for one buffer. */ if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) != 0) BUG(); @@ -59,32 +59,24 @@ static void register_buffer(void) /* At least we don't udelay() in a loop like some other drivers. */ static int virtio_data_present(struct hwrng *rng, int wait) { - if (data_left >= sizeof(u32)) + if (data_left) return 1; -again: if (!wait) return 0; wait_for_completion(&have_data); - - /* Not enough? Re-register. */ - if (unlikely(data_left < sizeof(u32))) { - register_buffer(); - goto again; - } - return 1; } /* virtio_data_present() must have succeeded before this is called. */ static int virtio_data_read(struct hwrng *rng, u32 *data) { - BUG_ON(data_left < sizeof(u32)); - data_left -= sizeof(u32); - *data = random_data[data_left / 4]; + BUG_ON(!data_left); + + *data = random_data[--data_left]; - if (data_left < sizeof(u32)) { + if (!data_left) { init_completion(&have_data); register_buffer(); } @@ -102,13 +94,13 @@ static int virtrng_probe(struct virtio_device *vdev) int err; /* We expect a single virtqueue. */ - vq = virtio_find_single_vq(vdev, random_recv_done, "input"); + vq = vdev->config->find_vq(vdev, 0, random_recv_done); if (IS_ERR(vq)) return PTR_ERR(vq); err = hwrng_register(&virtio_hwrng); if (err) { - vdev->config->del_vqs(vdev); + vdev->config->del_vq(vq); return err; } @@ -120,7 +112,7 @@ static void virtrng_remove(struct virtio_device *vdev) { vdev->config->reset(vdev); hwrng_unregister(&virtio_hwrng); - vdev->config->del_vqs(vdev); + vdev->config->del_vq(vq); } static struct virtio_device_id id_table[] = { diff --git a/trunk/drivers/char/virtio_console.c b/trunk/drivers/char/virtio_console.c index c74dacfa6795..ff6f5a4b58fb 100644 --- a/trunk/drivers/char/virtio_console.c +++ b/trunk/drivers/char/virtio_console.c @@ -188,9 +188,6 @@ static void hvc_handle_input(struct virtqueue *vq) * Finally we put our input buffer in the input queue, ready to receive. */ static int __devinit virtcons_probe(struct virtio_device *dev) { - vq_callback_t *callbacks[] = { hvc_handle_input, NULL}; - const char *names[] = { "input", "output" }; - struct virtqueue *vqs[2]; int err; vdev = dev; @@ -202,15 +199,20 @@ static int __devinit virtcons_probe(struct virtio_device *dev) goto fail; } - /* Find the queues. */ + /* Find the input queue. */ /* FIXME: This is why we want to wean off hvc: we do nothing * when input comes in. */ - err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); - if (err) + in_vq = vdev->config->find_vq(vdev, 0, hvc_handle_input); + if (IS_ERR(in_vq)) { + err = PTR_ERR(in_vq); goto free; + } - in_vq = vqs[0]; - out_vq = vqs[1]; + out_vq = vdev->config->find_vq(vdev, 1, NULL); + if (IS_ERR(out_vq)) { + err = PTR_ERR(out_vq); + goto free_in_vq; + } /* Start using the new console output. */ virtio_cons.get_chars = get_chars; @@ -231,15 +233,17 @@ static int __devinit virtcons_probe(struct virtio_device *dev) hvc = hvc_alloc(0, 0, &virtio_cons, PAGE_SIZE); if (IS_ERR(hvc)) { err = PTR_ERR(hvc); - goto free_vqs; + goto free_out_vq; } /* Register the input buffer the first time. */ add_inbuf(); return 0; -free_vqs: - vdev->config->del_vqs(vdev); +free_out_vq: + vdev->config->del_vq(out_vq); +free_in_vq: + vdev->config->del_vq(in_vq); free: kfree(inbuf); fail: diff --git a/trunk/drivers/ide/at91_ide.c b/trunk/drivers/ide/at91_ide.c index fc0949a8cfde..403d0e4265db 100644 --- a/trunk/drivers/ide/at91_ide.c +++ b/trunk/drivers/ide/at91_ide.c @@ -216,7 +216,6 @@ static const struct ide_port_info at91_ide_port_info __initdata = { .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE | IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS, .pio_mask = ATA_PIO6, - .chipset = ide_generic, }; /* @@ -247,7 +246,8 @@ irqreturn_t at91_irq_handler(int irq, void *dev_id) static int __init at91_ide_probe(struct platform_device *pdev) { int ret; - struct ide_hw hw, *hws[] = { &hw }; + hw_regs_t hw; + hw_regs_t *hws[] = { &hw, NULL, NULL, NULL }; struct ide_host *host; struct resource *res; unsigned long tf_base = 0, ctl_base = 0; @@ -304,9 +304,10 @@ static int __init at91_ide_probe(struct platform_device *pdev) ide_std_init_ports(&hw, tf_base, ctl_base + 6); hw.irq = board->irq_pin; + hw.chipset = ide_generic; hw.dev = &pdev->dev; - host = ide_host_alloc(&at91_ide_port_info, hws, 1); + host = ide_host_alloc(&at91_ide_port_info, hws); if (!host) { perr("failed to allocate ide host\n"); return -ENOMEM; diff --git a/trunk/drivers/ide/au1xxx-ide.c b/trunk/drivers/ide/au1xxx-ide.c index 58121bd6c115..46013644c965 100644 --- a/trunk/drivers/ide/au1xxx-ide.c +++ b/trunk/drivers/ide/au1xxx-ide.c @@ -449,7 +449,7 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d) } #endif -static void auide_setup_ports(struct ide_hw *hw, _auide_hwif *ahwif) +static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif) { int i; unsigned long *ata_regs = hw->io_ports_array; @@ -499,7 +499,6 @@ static const struct ide_port_info au1xxx_port_info = { #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA .mwdma_mask = ATA_MWDMA2, #endif - .chipset = ide_au1xxx, }; static int au_ide_probe(struct platform_device *dev) @@ -508,7 +507,7 @@ static int au_ide_probe(struct platform_device *dev) struct resource *res; struct ide_host *host; int ret = 0; - struct ide_hw hw, *hws[] = { &hw }; + hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) char *mode = "MWDMA2"; @@ -549,8 +548,9 @@ static int au_ide_probe(struct platform_device *dev) auide_setup_ports(&hw, ahwif); hw.irq = ahwif->irq; hw.dev = &dev->dev; + hw.chipset = ide_au1xxx; - ret = ide_host_add(&au1xxx_port_info, hws, 1, &host); + ret = ide_host_add(&au1xxx_port_info, hws, &host); if (ret) goto out; diff --git a/trunk/drivers/ide/buddha.c b/trunk/drivers/ide/buddha.c index e3c6a5913305..d028f8864bc1 100644 --- a/trunk/drivers/ide/buddha.c +++ b/trunk/drivers/ide/buddha.c @@ -121,7 +121,7 @@ static int xsurf_ack_intr(ide_hwif_t *hwif) return 1; } -static void __init buddha_setup_ports(struct ide_hw *hw, unsigned long base, +static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base, unsigned long ctl, unsigned long irq_port, ide_ack_intr_t *ack_intr) { @@ -139,12 +139,13 @@ static void __init buddha_setup_ports(struct ide_hw *hw, unsigned long base, hw->irq = IRQ_AMIGA_PORTS; hw->ack_intr = ack_intr; + + hw->chipset = ide_generic; } static const struct ide_port_info buddha_port_info = { .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, - .chipset = ide_generic, }; /* @@ -160,7 +161,7 @@ static int __init buddha_init(void) while ((z = zorro_find_device(ZORRO_WILDCARD, z))) { unsigned long board; - struct ide_hw hw[MAX_NUM_HWIFS], *hws[MAX_NUM_HWIFS]; + hw_regs_t hw[MAX_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL }; if (z->id == ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA) { buddha_num_hwifs = BUDDHA_NUM_HWIFS; @@ -224,7 +225,7 @@ static int __init buddha_init(void) hws[i] = &hw[i]; } - ide_host_add(&buddha_port_info, hws, i, NULL); + ide_host_add(&buddha_port_info, hws, NULL); } return 0; diff --git a/trunk/drivers/ide/cmd640.c b/trunk/drivers/ide/cmd640.c index 1683ed5c7329..8890276fef7f 100644 --- a/trunk/drivers/ide/cmd640.c +++ b/trunk/drivers/ide/cmd640.c @@ -708,7 +708,7 @@ static int __init cmd640x_init(void) int second_port_cmd640 = 0, rc; const char *bus_type, *port2; u8 b, cfr; - struct ide_hw hw[2], *hws[2]; + hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL }; if (cmd640_vlb && probe_for_cmd640_vlb()) { bus_type = "VLB"; @@ -762,9 +762,11 @@ static int __init cmd640x_init(void) ide_std_init_ports(&hw[0], 0x1f0, 0x3f6); hw[0].irq = 14; + hw[0].chipset = ide_cmd640; ide_std_init_ports(&hw[1], 0x170, 0x376); hw[1].irq = 15; + hw[1].chipset = ide_cmd640; printk(KERN_INFO "cmd640: buggy cmd640%c interface on %s, config=0x%02x" "\n", 'a' + cmd640_chip_version - 1, bus_type, cfr); @@ -822,8 +824,7 @@ static int __init cmd640x_init(void) cmd640_dump_regs(); #endif - return ide_host_add(&cmd640_port_info, hws, second_port_cmd640 ? 2 : 1, - NULL); + return ide_host_add(&cmd640_port_info, hws, NULL); } module_param_named(probe_vlb, cmd640_vlb, bool, 0); diff --git a/trunk/drivers/ide/cs5520.c b/trunk/drivers/ide/cs5520.c index bd066bb9d611..87987a7d36c9 100644 --- a/trunk/drivers/ide/cs5520.c +++ b/trunk/drivers/ide/cs5520.c @@ -110,7 +110,7 @@ static const struct ide_port_info cyrix_chipset __devinitdata = { static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id) { const struct ide_port_info *d = &cyrix_chipset; - struct ide_hw hw[2], *hws[] = { NULL, NULL }; + hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL }; ide_setup_pci_noise(dev, d); @@ -136,7 +136,7 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic ide_pci_setup_ports(dev, d, &hw[0], &hws[0]); hw[0].irq = 14; - return ide_host_add(d, hws, 2, NULL); + return ide_host_add(d, hws, NULL); } static const struct pci_device_id cs5520_pci_tbl[] = { diff --git a/trunk/drivers/ide/delkin_cb.c b/trunk/drivers/ide/delkin_cb.c index 1e10eba62ceb..f153b95619bb 100644 --- a/trunk/drivers/ide/delkin_cb.c +++ b/trunk/drivers/ide/delkin_cb.c @@ -68,7 +68,6 @@ static const struct ide_port_info delkin_cb_port_info = { IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, .init_chipset = delkin_cb_init_chipset, - .chipset = ide_pci, }; static int __devinit @@ -77,7 +76,7 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) struct ide_host *host; unsigned long base; int rc; - struct ide_hw hw, *hws[] = { &hw }; + hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; rc = pci_enable_device(dev); if (rc) { @@ -98,8 +97,9 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id) ide_std_init_ports(&hw, base + 0x10, base + 0x1e); hw.irq = dev->irq; hw.dev = &dev->dev; + hw.chipset = ide_pci; /* this enables IRQ sharing */ - rc = ide_host_add(&delkin_cb_port_info, hws, 1, &host); + rc = ide_host_add(&delkin_cb_port_info, hws, &host); if (rc) goto out_disable; diff --git a/trunk/drivers/ide/falconide.c b/trunk/drivers/ide/falconide.c index 22fa27389c3b..0e2df6755ec9 100644 --- a/trunk/drivers/ide/falconide.c +++ b/trunk/drivers/ide/falconide.c @@ -111,10 +111,9 @@ static const struct ide_port_info falconide_port_info = { .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, - .chipset = ide_generic, }; -static void __init falconide_setup_ports(struct ide_hw *hw) +static void __init falconide_setup_ports(hw_regs_t *hw) { int i; @@ -129,6 +128,8 @@ static void __init falconide_setup_ports(struct ide_hw *hw) hw->irq = IRQ_MFP_IDE; hw->ack_intr = NULL; + + hw->chipset = ide_generic; } /* @@ -138,7 +139,7 @@ static void __init falconide_setup_ports(struct ide_hw *hw) static int __init falconide_init(void) { struct ide_host *host; - struct ide_hw hw, *hws[] = { &hw }; + hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; int rc; if (!MACH_IS_ATARI || !ATARIHW_PRESENT(IDE)) @@ -153,7 +154,7 @@ static int __init falconide_init(void) falconide_setup_ports(&hw); - host = ide_host_alloc(&falconide_port_info, hws, 1); + host = ide_host_alloc(&falconide_port_info, hws); if (host == NULL) { rc = -ENOMEM; goto err; diff --git a/trunk/drivers/ide/gayle.c b/trunk/drivers/ide/gayle.c index 4451a6a5dfe0..c7119516c5a7 100644 --- a/trunk/drivers/ide/gayle.c +++ b/trunk/drivers/ide/gayle.c @@ -88,7 +88,7 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif) return 1; } -static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base, +static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base, unsigned long ctl, unsigned long irq_port, ide_ack_intr_t *ack_intr) { @@ -106,13 +106,14 @@ static void __init gayle_setup_ports(struct ide_hw *hw, unsigned long base, hw->irq = IRQ_AMIGA_PORTS; hw->ack_intr = ack_intr; + + hw->chipset = ide_generic; } static const struct ide_port_info gayle_port_info = { .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, - .chipset = ide_generic, }; /* @@ -125,7 +126,7 @@ static int __init gayle_init(void) unsigned long base, ctrlport, irqport; ide_ack_intr_t *ack_intr; int a4000, i, rc; - struct ide_hw hw[GAYLE_NUM_HWIFS], *hws[GAYLE_NUM_HWIFS]; + hw_regs_t hw[GAYLE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL }; if (!MACH_IS_AMIGA) return -ENODEV; @@ -170,7 +171,7 @@ static int __init gayle_init(void) hws[i] = &hw[i]; } - rc = ide_host_add(&gayle_port_info, hws, i, NULL); + rc = ide_host_add(&gayle_port_info, hws, NULL); if (rc) release_mem_region(res_start, res_n); diff --git a/trunk/drivers/ide/hpt366.c b/trunk/drivers/ide/hpt366.c index 7ce68ef6b904..0feb66c720e1 100644 --- a/trunk/drivers/ide/hpt366.c +++ b/trunk/drivers/ide/hpt366.c @@ -138,6 +138,14 @@ #undef HPT_RESET_STATE_ENGINE #undef HPT_DELAY_INTERRUPT +static const char *quirk_drives[] = { + "QUANTUM FIREBALLlct08 08", + "QUANTUM FIREBALLP KA6.4", + "QUANTUM FIREBALLP LM20.4", + "QUANTUM FIREBALLP LM20.5", + NULL +}; + static const char *bad_ata100_5[] = { "IBM-DTLA-307075", "IBM-DTLA-307060", @@ -721,13 +729,27 @@ static void hpt3xx_set_pio_mode(ide_drive_t *drive, const u8 pio) hpt3xx_set_mode(drive, XFER_PIO_0 + pio); } +static void hpt3xx_quirkproc(ide_drive_t *drive) +{ + char *m = (char *)&drive->id[ATA_ID_PROD]; + const char **list = quirk_drives; + + while (*list) + if (strstr(m, *list++)) { + drive->quirk_list = 1; + return; + } + + drive->quirk_list = 0; +} + static void hpt3xx_maskproc(ide_drive_t *drive, int mask) { ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); struct hpt_info *info = hpt3xx_get_info(hwif->dev); - if ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0) + if (drive->quirk_list == 0) return; if (info->chip_type >= HPT370) { @@ -1382,6 +1404,7 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2) static const struct ide_port_ops hpt3xx_port_ops = { .set_pio_mode = hpt3xx_set_pio_mode, .set_dma_mode = hpt3xx_set_mode, + .quirkproc = hpt3xx_quirkproc, .maskproc = hpt3xx_maskproc, .mdma_filter = hpt3xx_mdma_filter, .udma_filter = hpt3xx_udma_filter, diff --git a/trunk/drivers/ide/icside.c b/trunk/drivers/ide/icside.c index 5af3d0ffaf0a..36da913cc553 100644 --- a/trunk/drivers/ide/icside.c +++ b/trunk/drivers/ide/icside.c @@ -65,6 +65,8 @@ static struct cardinfo icside_cardinfo_v6_2 = { }; struct icside_state { + unsigned int channel; + unsigned int enabled; void __iomem *irq_port; void __iomem *ioc_base; unsigned int sel; @@ -114,11 +116,18 @@ static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) struct icside_state *state = ec->irq_data; void __iomem *base = state->irq_port; - writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1); - readb(base + ICS_ARCIN_V6_INTROFFSET_2); + state->enabled = 1; - writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2); - readb(base + ICS_ARCIN_V6_INTROFFSET_1); + switch (state->channel) { + case 0: + writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1); + readb(base + ICS_ARCIN_V6_INTROFFSET_2); + break; + case 1: + writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2); + readb(base + ICS_ARCIN_V6_INTROFFSET_1); + break; + } } /* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) @@ -128,6 +137,8 @@ static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) { struct icside_state *state = ec->irq_data; + state->enabled = 0; + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); } @@ -149,6 +160,44 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = { .irqpending = icside_irqpending_arcin_v6, }; +/* + * Handle routing of interrupts. This is called before + * we write the command to the drive. + */ +static void icside_maskproc(ide_drive_t *drive, int mask) +{ + ide_hwif_t *hwif = drive->hwif; + struct expansion_card *ec = ECARD_DEV(hwif->dev); + struct icside_state *state = ecard_get_drvdata(ec); + unsigned long flags; + + local_irq_save(flags); + + state->channel = hwif->channel; + + if (state->enabled && !mask) { + switch (hwif->channel) { + case 0: + writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); + break; + case 1: + writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); + break; + } + } else { + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); + } + + local_irq_restore(flags); +} + +static const struct ide_port_ops icside_v6_no_dma_port_ops = { + .maskproc = icside_maskproc, +}; + #ifdef CONFIG_BLK_DEV_IDEDMA_ICS /* * SG-DMA support. @@ -226,6 +275,7 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode) static const struct ide_port_ops icside_v6_port_ops = { .set_dma_mode = icside_set_dma_mode, + .maskproc = icside_maskproc, }; static void icside_dma_host_set(ide_drive_t *drive, int on) @@ -269,6 +319,11 @@ static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) */ BUG_ON(dma_channel_active(ec->dma)); + /* + * Ensure that we have the right interrupt routed. + */ + icside_maskproc(drive, 0); + /* * Route the DMA signals to the correct interface. */ @@ -326,7 +381,7 @@ static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d) return -EOPNOTSUPP; } -static void icside_setup_ports(struct ide_hw *hw, void __iomem *base, +static void icside_setup_ports(hw_regs_t *hw, void __iomem *base, struct cardinfo *info, struct expansion_card *ec) { unsigned long port = (unsigned long)base + info->dataoffset; @@ -343,11 +398,11 @@ static void icside_setup_ports(struct ide_hw *hw, void __iomem *base, hw->irq = ec->irq; hw->dev = &ec->dev; + hw->chipset = ide_acorn; } static const struct ide_port_info icside_v5_port_info = { .host_flags = IDE_HFLAG_NO_DMA, - .chipset = ide_acorn, }; static int __devinit @@ -355,7 +410,7 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec) { void __iomem *base; struct ide_host *host; - struct ide_hw hw, *hws[] = { &hw }; + hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; int ret; base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0); @@ -376,7 +431,7 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec) icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec); - host = ide_host_alloc(&icside_v5_port_info, hws, 1); + host = ide_host_alloc(&icside_v5_port_info, hws); if (host == NULL) return -ENODEV; @@ -397,11 +452,11 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec) static const struct ide_port_info icside_v6_port_info __initdata = { .init_dma = icside_dma_off_init, + .port_ops = &icside_v6_no_dma_port_ops, .dma_ops = &icside_v6_dma_ops, .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, .mwdma_mask = ATA_MWDMA2, .swdma_mask = ATA_SWDMA2, - .chipset = ide_acorn, }; static int __devinit @@ -411,7 +466,7 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) struct ide_host *host; unsigned int sel = 0; int ret; - struct ide_hw hw[2], *hws[] = { &hw[0], &hw[1] }; + hw_regs_t hw[2], *hws[] = { &hw[0], &hw[1], NULL, NULL }; struct ide_port_info d = icside_v6_port_info; ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0); @@ -451,7 +506,7 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec) icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec); icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec); - host = ide_host_alloc(&d, hws, 2); + host = ide_host_alloc(&d, hws); if (host == NULL) return -ENODEV; diff --git a/trunk/drivers/ide/ide-4drives.c b/trunk/drivers/ide/ide-4drives.c index 979d342c338a..78aca75a2c48 100644 --- a/trunk/drivers/ide/ide-4drives.c +++ b/trunk/drivers/ide/ide-4drives.c @@ -25,13 +25,12 @@ static const struct ide_port_info ide_4drives_port_info = { .port_ops = &ide_4drives_port_ops, .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA | IDE_HFLAG_4DRIVES, - .chipset = ide_4drives, }; static int __init ide_4drives_init(void) { unsigned long base = 0x1f0, ctl = 0x3f6; - struct ide_hw hw, *hws[] = { &hw, &hw }; + hw_regs_t hw, *hws[] = { &hw, &hw, NULL, NULL }; if (probe_4drives == 0) return -ENODEV; @@ -53,8 +52,9 @@ static int __init ide_4drives_init(void) ide_std_init_ports(&hw, base, ctl); hw.irq = 14; + hw.chipset = ide_4drives; - return ide_host_add(&ide_4drives_port_info, hws, 2, NULL); + return ide_host_add(&ide_4drives_port_info, hws, NULL); } module_init(ide_4drives_init); diff --git a/trunk/drivers/ide/ide-atapi.c b/trunk/drivers/ide/ide-atapi.c index bbdd2547f12a..757e5956b132 100644 --- a/trunk/drivers/ide/ide-atapi.c +++ b/trunk/drivers/ide/ide-atapi.c @@ -259,7 +259,7 @@ void ide_retry_pc(ide_drive_t *drive) pc->req_xfer = blk_rq_bytes(sense_rq); if (drive->media == ide_tape) - drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC; + set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); /* * Push back the failed request and put request sense on top diff --git a/trunk/drivers/ide/ide-cs.c b/trunk/drivers/ide/ide-cs.c index 527908ff298c..9e47f3529d55 100644 --- a/trunk/drivers/ide/ide-cs.c +++ b/trunk/drivers/ide/ide-cs.c @@ -155,7 +155,6 @@ static const struct ide_port_info idecs_port_info = { .port_ops = &idecs_port_ops, .host_flags = IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, - .chipset = ide_pci, }; static struct ide_host *idecs_register(unsigned long io, unsigned long ctl, @@ -164,7 +163,7 @@ static struct ide_host *idecs_register(unsigned long io, unsigned long ctl, struct ide_host *host; ide_hwif_t *hwif; int i, rc; - struct ide_hw hw, *hws[] = { &hw }; + hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; if (!request_region(io, 8, DRV_NAME)) { printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n", @@ -182,9 +181,10 @@ static struct ide_host *idecs_register(unsigned long io, unsigned long ctl, memset(&hw, 0, sizeof(hw)); ide_std_init_ports(&hw, io, ctl); hw.irq = irq; + hw.chipset = ide_pci; hw.dev = &handle->dev; - rc = ide_host_add(&idecs_port_info, hws, 1, &host); + rc = ide_host_add(&idecs_port_info, hws, &host); if (rc) goto out_release; diff --git a/trunk/drivers/ide/ide-disk.c b/trunk/drivers/ide/ide-disk.c index 6a1de2169709..c6f7fcfb9d67 100644 --- a/trunk/drivers/ide/ide-disk.c +++ b/trunk/drivers/ide/ide-disk.c @@ -302,12 +302,14 @@ static const struct drive_list_entry hpa_list[] = { { NULL, NULL } }; -static u64 ide_disk_hpa_get_native_capacity(ide_drive_t *drive, int lba48) +static void idedisk_check_hpa(ide_drive_t *drive) { - u64 capacity, set_max; + unsigned long long capacity, set_max; + int lba48 = ata_id_lba48_enabled(drive->id); capacity = drive->capacity64; - set_max = idedisk_read_native_max_address(drive, lba48); + + set_max = idedisk_read_native_max_address(drive, lba48); if (ide_in_drive_list(drive->id, hpa_list)) { /* @@ -318,31 +320,9 @@ static u64 ide_disk_hpa_get_native_capacity(ide_drive_t *drive, int lba48) set_max--; } - return set_max; -} - -static u64 ide_disk_hpa_set_capacity(ide_drive_t *drive, u64 set_max, int lba48) -{ - set_max = idedisk_set_max_address(drive, set_max, lba48); - if (set_max) - drive->capacity64 = set_max; - - return set_max; -} - -static void idedisk_check_hpa(ide_drive_t *drive) -{ - u64 capacity, set_max; - int lba48 = ata_id_lba48_enabled(drive->id); - - capacity = drive->capacity64; - set_max = ide_disk_hpa_get_native_capacity(drive, lba48); - if (set_max <= capacity) return; - drive->probed_capacity = set_max; - printk(KERN_INFO "%s: Host Protected Area detected.\n" "\tcurrent capacity is %llu sectors (%llu MB)\n" "\tnative capacity is %llu sectors (%llu MB)\n", @@ -350,13 +330,13 @@ static void idedisk_check_hpa(ide_drive_t *drive) capacity, sectors_to_MB(capacity), set_max, sectors_to_MB(set_max)); - if ((drive->dev_flags & IDE_DFLAG_NOHPA) == 0) - return; + set_max = idedisk_set_max_address(drive, set_max, lba48); - set_max = ide_disk_hpa_set_capacity(drive, set_max, lba48); - if (set_max) + if (set_max) { + drive->capacity64 = set_max; printk(KERN_INFO "%s: Host Protected Area disabled.\n", drive->name); + } } static int ide_disk_get_capacity(ide_drive_t *drive) @@ -378,8 +358,6 @@ static int ide_disk_get_capacity(ide_drive_t *drive) drive->capacity64 = drive->cyl * drive->head * drive->sect; } - drive->probed_capacity = drive->capacity64; - if (lba) { drive->dev_flags |= IDE_DFLAG_LBA; @@ -398,7 +376,7 @@ static int ide_disk_get_capacity(ide_drive_t *drive) "%llu sectors (%llu MB)\n", drive->name, (unsigned long long)drive->capacity64, sectors_to_MB(drive->capacity64)); - drive->probed_capacity = drive->capacity64 = 1ULL << 28; + drive->capacity64 = 1ULL << 28; } if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && @@ -414,34 +392,6 @@ static int ide_disk_get_capacity(ide_drive_t *drive) return 0; } -static u64 ide_disk_set_capacity(ide_drive_t *drive, u64 capacity) -{ - u64 set = min(capacity, drive->probed_capacity); - u16 *id = drive->id; - int lba48 = ata_id_lba48_enabled(id); - - if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 || - ata_id_hpa_enabled(id) == 0) - goto out; - - /* - * according to the spec the SET MAX ADDRESS command shall be - * immediately preceded by a READ NATIVE MAX ADDRESS command - */ - capacity = ide_disk_hpa_get_native_capacity(drive, lba48); - if (capacity == 0) - goto out; - - set = ide_disk_hpa_set_capacity(drive, set, lba48); - if (set) { - /* needed for ->resume to disable HPA */ - drive->dev_flags |= IDE_DFLAG_NOHPA; - return set; - } -out: - return drive->capacity64; -} - static void idedisk_prepare_flush(struct request_queue *q, struct request *rq) { ide_drive_t *drive = q->queuedata; @@ -478,14 +428,14 @@ static int set_multcount(ide_drive_t *drive, int arg) if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff)) return -EINVAL; - if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) + if (drive->special.b.set_multmode) return -EBUSY; rq = blk_get_request(drive->queue, READ, __GFP_WAIT); rq->cmd_type = REQ_TYPE_ATA_TASKFILE; drive->mult_req = arg; - drive->special_flags |= IDE_SFLAG_SET_MULTMODE; + drive->special.b.set_multmode = 1; error = blk_execute_rq(drive->queue, NULL, rq, 0); blk_put_request(rq); @@ -790,7 +740,6 @@ static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk, const struct ide_disk_ops ide_ata_disk_ops = { .check = ide_disk_check, - .set_capacity = ide_disk_set_capacity, .get_capacity = ide_disk_get_capacity, .setup = ide_disk_setup, .flush = ide_disk_flush, diff --git a/trunk/drivers/ide/ide-dma.c b/trunk/drivers/ide/ide-dma.c index 219e6fb78dc6..001f68f0bb28 100644 --- a/trunk/drivers/ide/ide-dma.c +++ b/trunk/drivers/ide/ide-dma.c @@ -347,6 +347,7 @@ u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode) return mode; } +EXPORT_SYMBOL_GPL(ide_find_dma_mode); static int ide_tune_dma(ide_drive_t *drive) { diff --git a/trunk/drivers/ide/ide-eh.c b/trunk/drivers/ide/ide-eh.c index 2b9141979613..5d5fb961b5ce 100644 --- a/trunk/drivers/ide/ide-eh.c +++ b/trunk/drivers/ide/ide-eh.c @@ -52,7 +52,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, } if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) - drive->special_flags |= IDE_SFLAG_RECALIBRATE; + drive->special.b.recalibrate = 1; ++rq->errors; @@ -268,8 +268,9 @@ static void ide_disk_pre_reset(ide_drive_t *drive) { int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1; - drive->special_flags = - legacy ? (IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE) : 0; + drive->special.all = 0; + drive->special.b.set_geometry = legacy; + drive->special.b.recalibrate = legacy; drive->mult_count = 0; drive->dev_flags &= ~IDE_DFLAG_PARKED; @@ -279,7 +280,7 @@ static void ide_disk_pre_reset(ide_drive_t *drive) drive->mult_req = 0; if (drive->mult_req != drive->mult_count) - drive->special_flags |= IDE_SFLAG_SET_MULTMODE; + drive->special.b.set_multmode = 1; } static void pre_reset(ide_drive_t *drive) @@ -407,9 +408,8 @@ static ide_startstop_t do_reset1(ide_drive_t *drive, int do_not_try_atapi) /* more than enough time */ udelay(10); /* clear SRST, leave nIEN (unless device is on the quirk list) */ - tp_ops->write_devctl(hwif, - ((drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) ? 0 : ATA_NIEN) | - ATA_DEVCTL_OBS); + tp_ops->write_devctl(hwif, (drive->quirk_list == 2 ? 0 : ATA_NIEN) | + ATA_DEVCTL_OBS); /* more than enough time */ udelay(10); hwif->poll_timeout = jiffies + WAIT_WORSTCASE; diff --git a/trunk/drivers/ide/ide-gd.c b/trunk/drivers/ide/ide-gd.c index 214119026b3f..4b6b71e2cdf5 100644 --- a/trunk/drivers/ide/ide-gd.c +++ b/trunk/drivers/ide/ide-gd.c @@ -287,19 +287,6 @@ static int ide_gd_media_changed(struct gendisk *disk) return ret; } -static unsigned long long ide_gd_set_capacity(struct gendisk *disk, - unsigned long long capacity) -{ - struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); - ide_drive_t *drive = idkp->drive; - const struct ide_disk_ops *disk_ops = drive->disk_ops; - - if (disk_ops->set_capacity) - return disk_ops->set_capacity(drive, capacity); - - return drive->capacity64; -} - static int ide_gd_revalidate_disk(struct gendisk *disk) { struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); @@ -328,7 +315,6 @@ static struct block_device_operations ide_gd_ops = { .locked_ioctl = ide_gd_ioctl, .getgeo = ide_gd_getgeo, .media_changed = ide_gd_media_changed, - .set_capacity = ide_gd_set_capacity, .revalidate_disk = ide_gd_revalidate_disk }; diff --git a/trunk/drivers/ide/ide-generic.c b/trunk/drivers/ide/ide-generic.c index 54d7c4685d23..7812ca0be13b 100644 --- a/trunk/drivers/ide/ide-generic.c +++ b/trunk/drivers/ide/ide-generic.c @@ -29,7 +29,6 @@ MODULE_PARM_DESC(probe_mask, "probe mask for legacy ISA IDE ports"); static const struct ide_port_info ide_generic_port_info = { .host_flags = IDE_HFLAG_NO_DMA, - .chipset = ide_generic, }; #ifdef CONFIG_ARM @@ -86,7 +85,7 @@ static void ide_generic_check_pci_legacy_iobases(int *primary, int *secondary) static int __init ide_generic_init(void) { - struct ide_hw hw, *hws[] = { &hw }; + hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; unsigned long io_addr; int i, rc = 0, primary = 0, secondary = 0; @@ -133,7 +132,9 @@ static int __init ide_generic_init(void) #else hw.irq = legacy_irqs[i]; #endif - rc = ide_host_add(&ide_generic_port_info, hws, 1, NULL); + hw.chipset = ide_generic; + + rc = ide_host_add(&ide_generic_port_info, hws, NULL); if (rc) { release_region(io_addr + 0x206, 1); release_region(io_addr, 8); diff --git a/trunk/drivers/ide/ide-h8300.c b/trunk/drivers/ide/ide-h8300.c index 520f42c5445a..c06ebdc4a130 100644 --- a/trunk/drivers/ide/ide-h8300.c +++ b/trunk/drivers/ide/ide-h8300.c @@ -64,26 +64,26 @@ static const struct ide_tp_ops h8300_tp_ops = { #define H8300_IDE_GAP (2) -static inline void hw_setup(struct ide_hw *hw) +static inline void hw_setup(hw_regs_t *hw) { int i; - memset(hw, 0, sizeof(*hw)); + memset(hw, 0, sizeof(hw_regs_t)); for (i = 0; i <= 7; i++) hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i; hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT; hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ; + hw->chipset = ide_generic; } static const struct ide_port_info h8300_port_info = { .tp_ops = &h8300_tp_ops, .host_flags = IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_NO_DMA, - .chipset = ide_generic, }; static int __init h8300_ide_init(void) { - struct ide_hw hw, *hws[] = { &hw }; + hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; printk(KERN_INFO DRV_NAME ": H8/300 generic IDE interface\n"); @@ -96,7 +96,7 @@ static int __init h8300_ide_init(void) hw_setup(&hw); - return ide_host_add(&h8300_port_info, hws, 1, NULL); + return ide_host_add(&h8300_port_info, hws, NULL); out_busy: printk(KERN_ERR "ide-h8300: IDE I/F resource already used.\n"); diff --git a/trunk/drivers/ide/ide-io.c b/trunk/drivers/ide/ide-io.c index 272cc38f6dbe..bba4297f2f03 100644 --- a/trunk/drivers/ide/ide-io.c +++ b/trunk/drivers/ide/ide-io.c @@ -184,42 +184,29 @@ static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf) tf->command = ATA_CMD_SET_MULTI; } -/** - * do_special - issue some special commands - * @drive: drive the command is for - * - * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS, - * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive. - */ - -static ide_startstop_t do_special(ide_drive_t *drive) +static ide_startstop_t ide_disk_special(ide_drive_t *drive) { + special_t *s = &drive->special; struct ide_cmd cmd; -#ifdef DEBUG - printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__, - drive->special_flags); -#endif - if (drive->media != ide_disk) { - drive->special_flags = 0; - drive->mult_req = 0; - return ide_stopped; - } - memset(&cmd, 0, sizeof(cmd)); cmd.protocol = ATA_PROT_NODATA; - if (drive->special_flags & IDE_SFLAG_SET_GEOMETRY) { - drive->special_flags &= ~IDE_SFLAG_SET_GEOMETRY; + if (s->b.set_geometry) { + s->b.set_geometry = 0; ide_tf_set_specify_cmd(drive, &cmd.tf); - } else if (drive->special_flags & IDE_SFLAG_RECALIBRATE) { - drive->special_flags &= ~IDE_SFLAG_RECALIBRATE; + } else if (s->b.recalibrate) { + s->b.recalibrate = 0; ide_tf_set_restore_cmd(drive, &cmd.tf); - } else if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) { - drive->special_flags &= ~IDE_SFLAG_SET_MULTMODE; + } else if (s->b.set_multmode) { + s->b.set_multmode = 0; ide_tf_set_setmult_cmd(drive, &cmd.tf); - } else - BUG(); + } else if (s->all) { + int special = s->all; + s->all = 0; + printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special); + return ide_stopped; + } cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; @@ -230,6 +217,31 @@ static ide_startstop_t do_special(ide_drive_t *drive) return ide_started; } +/** + * do_special - issue some special commands + * @drive: drive the command is for + * + * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS, + * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive. + * + * It used to do much more, but has been scaled back. + */ + +static ide_startstop_t do_special (ide_drive_t *drive) +{ + special_t *s = &drive->special; + +#ifdef DEBUG + printk("%s: do_special: 0x%02x\n", drive->name, s->all); +#endif + if (drive->media == ide_disk) + return ide_disk_special(drive); + + s->all = 0; + drive->mult_req = 0; + return ide_stopped; +} + void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; @@ -339,8 +351,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) printk(KERN_ERR "%s: drive not ready for command\n", drive->name); return startstop; } - - if (drive->special_flags == 0) { + if (!drive->special.all) { struct ide_driver *drv; /* @@ -488,15 +499,11 @@ void do_ide_request(struct request_queue *q) if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) && hwif != prev_port) { - ide_drive_t *cur_dev = - prev_port ? prev_port->cur_dev : NULL; - /* * set nIEN for previous port, drives in the - * quirk list may not like intr setups/cleanups + * quirk_list may not like intr setups/cleanups */ - if (cur_dev && - (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0) + if (prev_port && prev_port->cur_dev->quirk_list == 0) prev_port->tp_ops->write_devctl(prev_port, ATA_NIEN | ATA_DEVCTL_OBS); diff --git a/trunk/drivers/ide/ide-iops.c b/trunk/drivers/ide/ide-iops.c index fa047150a1c6..06fe002116ec 100644 --- a/trunk/drivers/ide/ide-iops.c +++ b/trunk/drivers/ide/ide-iops.c @@ -282,29 +282,6 @@ u8 eighty_ninty_three(ide_drive_t *drive) return 0; } -static const char *nien_quirk_list[] = { - "QUANTUM FIREBALLlct08 08", - "QUANTUM FIREBALLP KA6.4", - "QUANTUM FIREBALLP KA9.1", - "QUANTUM FIREBALLP KX13.6", - "QUANTUM FIREBALLP KX20.5", - "QUANTUM FIREBALLP KX27.3", - "QUANTUM FIREBALLP LM20.4", - "QUANTUM FIREBALLP LM20.5", - NULL -}; - -void ide_check_nien_quirk_list(ide_drive_t *drive) -{ - const char **list, *m = (char *)&drive->id[ATA_ID_PROD]; - - for (list = nien_quirk_list; *list != NULL; list++) - if (strstr(m, *list) != NULL) { - drive->dev_flags |= IDE_DFLAG_NIEN_QUIRK; - return; - } -} - int ide_driveid_update(ide_drive_t *drive) { u16 *id; @@ -334,6 +311,7 @@ int ide_driveid_update(ide_drive_t *drive) return 1; out_err: + SELECT_MASK(drive, 0); if (rc == 2) printk(KERN_ERR "%s: %s: bad status\n", drive->name, __func__); kfree(id); @@ -387,7 +365,7 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed) tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES); - if (drive->dev_flags & IDE_DFLAG_NIEN_QUIRK) + if (drive->quirk_list == 2) tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); error = __ide_wait_stat(drive, drive->ready_stat, diff --git a/trunk/drivers/ide/ide-legacy.c b/trunk/drivers/ide/ide-legacy.c index b9654a7bb7be..8c5dcbf22547 100644 --- a/trunk/drivers/ide/ide-legacy.c +++ b/trunk/drivers/ide/ide-legacy.c @@ -1,7 +1,7 @@ #include #include -static void ide_legacy_init_one(struct ide_hw **hws, struct ide_hw *hw, +static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw, u8 port_no, const struct ide_port_info *d, unsigned long config) { @@ -33,6 +33,7 @@ static void ide_legacy_init_one(struct ide_hw **hws, struct ide_hw *hw, ide_std_init_ports(hw, base, ctl); hw->irq = irq; + hw->chipset = d->chipset; hw->config = config; hws[port_no] = hw; @@ -40,7 +41,7 @@ static void ide_legacy_init_one(struct ide_hw **hws, struct ide_hw *hw, int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config) { - struct ide_hw hw[2], *hws[] = { NULL, NULL }; + hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL }; memset(&hw, 0, sizeof(hw)); @@ -52,6 +53,6 @@ int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config) (d->host_flags & IDE_HFLAG_SINGLE)) return -ENOENT; - return ide_host_add(d, hws, 2, NULL); + return ide_host_add(d, hws, NULL); } EXPORT_SYMBOL_GPL(ide_legacy_device_add); diff --git a/trunk/drivers/ide/ide-pnp.c b/trunk/drivers/ide/ide-pnp.c index 017b1df3b805..6e80b774e88a 100644 --- a/trunk/drivers/ide/ide-pnp.c +++ b/trunk/drivers/ide/ide-pnp.c @@ -29,7 +29,6 @@ static struct pnp_device_id idepnp_devices[] = { static const struct ide_port_info ide_pnp_port_info = { .host_flags = IDE_HFLAG_NO_DMA, - .chipset = ide_generic, }; static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) @@ -37,7 +36,7 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) struct ide_host *host; unsigned long base, ctl; int rc; - struct ide_hw hw, *hws[] = { &hw }; + hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n"); @@ -63,8 +62,9 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) memset(&hw, 0, sizeof(hw)); ide_std_init_ports(&hw, base, ctl); hw.irq = pnp_irq(dev, 0); + hw.chipset = ide_generic; - rc = ide_host_add(&ide_pnp_port_info, hws, 1, &host); + rc = ide_host_add(&ide_pnp_port_info, hws, &host); if (rc) goto out; diff --git a/trunk/drivers/ide/ide-probe.c b/trunk/drivers/ide/ide-probe.c index f371b0de314f..c895ed52b2e8 100644 --- a/trunk/drivers/ide/ide-probe.c +++ b/trunk/drivers/ide/ide-probe.c @@ -97,7 +97,7 @@ static void ide_disk_init_mult_count(ide_drive_t *drive) drive->mult_req = id[ATA_ID_MULTSECT] & 0xff; if (drive->mult_req) - drive->special_flags |= IDE_SFLAG_SET_MULTMODE; + drive->special.b.set_multmode = 1; } } @@ -465,8 +465,23 @@ static u8 probe_for_drive(ide_drive_t *drive) int rc; u8 cmd; + /* + * In order to keep things simple we have an id + * block for all drives at all times. If the device + * is pre ATA or refuses ATA/ATAPI identify we + * will add faked data to this. + * + * Also note that 0 everywhere means "can't do X" + */ + drive->dev_flags &= ~IDE_DFLAG_ID_READ; + drive->id = kzalloc(SECTOR_SIZE, GFP_KERNEL); + if (drive->id == NULL) { + printk(KERN_ERR "ide: out of memory for id data.\n"); + return 0; + } + m = (char *)&drive->id[ATA_ID_PROD]; strcpy(m, "UNKNOWN"); @@ -482,7 +497,7 @@ static u8 probe_for_drive(ide_drive_t *drive) } if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) - return 0; + goto out_free; /* identification failed? */ if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) { @@ -506,7 +521,7 @@ static u8 probe_for_drive(ide_drive_t *drive) } if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) - return 0; + goto out_free; /* The drive wasn't being helpful. Add generic info only */ if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) { @@ -520,6 +535,9 @@ static u8 probe_for_drive(ide_drive_t *drive) } return 1; +out_free: + kfree(drive->id); + return 0; } static void hwif_release_dev(struct device *dev) @@ -684,14 +702,8 @@ static int ide_probe_port(ide_hwif_t *hwif) if (irqd) disable_irq(hwif->irq); - rc = ide_port_wait_ready(hwif); - if (rc == -ENODEV) { - printk(KERN_INFO "%s: no devices on the port\n", hwif->name); - goto out; - } else if (rc == -EBUSY) - printk(KERN_ERR "%s: not ready before the probe\n", hwif->name); - else - rc = -ENODEV; + if (ide_port_wait_ready(hwif) == -EBUSY) + printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name); /* * Second drive should only exist if first drive was found, @@ -702,7 +714,7 @@ static int ide_probe_port(ide_hwif_t *hwif) if (drive->dev_flags & IDE_DFLAG_PRESENT) rc = 0; } -out: + /* * Use cached IRQ number. It might be (and is...) changed by probe * code above @@ -720,8 +732,6 @@ static void ide_port_tune_devices(ide_hwif_t *hwif) int i; ide_port_for_each_present_dev(i, drive, hwif) { - ide_check_nien_quirk_list(drive); - if (port_ops && port_ops->quirkproc) port_ops->quirkproc(drive); } @@ -807,6 +817,8 @@ static int ide_port_setup_devices(ide_hwif_t *hwif) if (ide_init_queue(drive)) { printk(KERN_ERR "ide: failed to init %s\n", drive->name); + kfree(drive->id); + drive->id = NULL; drive->dev_flags &= ~IDE_DFLAG_PRESENT; continue; } @@ -935,6 +947,9 @@ static void drive_release_dev (struct device *dev) blk_cleanup_queue(drive->queue); drive->queue = NULL; + kfree(drive->id); + drive->id = NULL; + drive->dev_flags &= ~IDE_DFLAG_PRESENT; complete(&drive->gendev_rel_comp); @@ -1020,15 +1035,6 @@ static void ide_port_init_devices(ide_hwif_t *hwif) if (port_ops && port_ops->init_dev) port_ops->init_dev(drive); } - - ide_port_for_each_dev(i, drive, hwif) { - /* - * default to PIO Mode 0 before we figure out - * the most suited mode for the attached device - */ - if (port_ops && port_ops->set_pio_mode) - port_ops->set_pio_mode(drive, 0); - } } static void ide_init_port(ide_hwif_t *hwif, unsigned int port, @@ -1036,7 +1042,8 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port, { hwif->channel = port; - hwif->chipset = d->chipset ? d->chipset : ide_pci; + if (d->chipset) + hwif->chipset = d->chipset; if (d->init_iops) d->init_iops(hwif); @@ -1117,19 +1124,16 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif) ide_port_for_each_dev(i, drive, hwif) { u8 j = (hwif->index * MAX_DRIVES) + i; - u16 *saved_id = drive->id; memset(drive, 0, sizeof(*drive)); - memset(saved_id, 0, SECTOR_SIZE); - drive->id = saved_id; drive->media = ide_disk; drive->select = (i << 4) | ATA_DEVICE_OBS; drive->hwif = hwif; drive->ready_stat = ATA_DRDY; drive->bad_wstat = BAD_W_STAT; - drive->special_flags = IDE_SFLAG_RECALIBRATE | - IDE_SFLAG_SET_GEOMETRY; + drive->special.b.recalibrate = 1; + drive->special.b.set_geometry = 1; drive->name[0] = 'h'; drive->name[1] = 'd'; drive->name[2] = 'a' + j; @@ -1164,10 +1168,11 @@ static void ide_init_port_data(ide_hwif_t *hwif, unsigned int index) ide_port_init_devices_data(hwif); } -static void ide_init_port_hw(ide_hwif_t *hwif, struct ide_hw *hw) +static void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw) { memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports)); hwif->irq = hw->irq; + hwif->chipset = hw->chipset; hwif->dev = hw->dev; hwif->gendev.parent = hw->parent ? hw->parent : hw->dev; hwif->ack_intr = hw->ack_intr; @@ -1228,10 +1233,8 @@ static void ide_port_free_devices(ide_hwif_t *hwif) ide_drive_t *drive; int i; - ide_port_for_each_dev(i, drive, hwif) { - kfree(drive->id); + ide_port_for_each_dev(i, drive, hwif) kfree(drive); - } } static int ide_port_alloc_devices(ide_hwif_t *hwif, int node) @@ -1245,18 +1248,6 @@ static int ide_port_alloc_devices(ide_hwif_t *hwif, int node) if (drive == NULL) goto out_nomem; - /* - * In order to keep things simple we have an id - * block for all drives at all times. If the device - * is pre ATA or refuses ATA/ATAPI identify we - * will add faked data to this. - * - * Also note that 0 everywhere means "can't do X" - */ - drive->id = kzalloc_node(SECTOR_SIZE, GFP_KERNEL, node); - if (drive->id == NULL) - goto out_nomem; - hwif->devices[i] = drive; } return 0; @@ -1266,8 +1257,7 @@ static int ide_port_alloc_devices(ide_hwif_t *hwif, int node) return -ENOMEM; } -struct ide_host *ide_host_alloc(const struct ide_port_info *d, - struct ide_hw **hws, unsigned int n_ports) +struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws) { struct ide_host *host; struct device *dev = hws[0] ? hws[0]->dev : NULL; @@ -1278,7 +1268,7 @@ struct ide_host *ide_host_alloc(const struct ide_port_info *d, if (host == NULL) return NULL; - for (i = 0; i < n_ports; i++) { + for (i = 0; i < MAX_HOST_PORTS; i++) { ide_hwif_t *hwif; int idx; @@ -1298,7 +1288,6 @@ struct ide_host *ide_host_alloc(const struct ide_port_info *d, if (idx < 0) { printk(KERN_ERR "%s: no free slot for interface\n", d ? d->name : "ide"); - ide_port_free_devices(hwif); kfree(hwif); continue; } @@ -1355,7 +1344,7 @@ static void ide_disable_port(ide_hwif_t *hwif) } int ide_host_register(struct ide_host *host, const struct ide_port_info *d, - struct ide_hw **hws) + hw_regs_t **hws) { ide_hwif_t *hwif, *mate = NULL; int i, j = 0; @@ -1449,13 +1438,13 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d, } EXPORT_SYMBOL_GPL(ide_host_register); -int ide_host_add(const struct ide_port_info *d, struct ide_hw **hws, - unsigned int n_ports, struct ide_host **hostp) +int ide_host_add(const struct ide_port_info *d, hw_regs_t **hws, + struct ide_host **hostp) { struct ide_host *host; int rc; - host = ide_host_alloc(d, hws, n_ports); + host = ide_host_alloc(d, hws); if (host == NULL) return -ENOMEM; diff --git a/trunk/drivers/ide/ide-tape.c b/trunk/drivers/ide/ide-tape.c index 4b447a8a49d4..d9764f0bc82f 100644 --- a/trunk/drivers/ide/ide-tape.c +++ b/trunk/drivers/ide/ide-tape.c @@ -240,27 +240,18 @@ static struct class *idetape_sysfs_class; static void ide_tape_release(struct device *); -static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES]; - -static struct ide_tape_obj *ide_tape_get(struct gendisk *disk, bool cdev, - unsigned int i) +static struct ide_tape_obj *ide_tape_get(struct gendisk *disk) { struct ide_tape_obj *tape = NULL; mutex_lock(&idetape_ref_mutex); - - if (cdev) - tape = idetape_devs[i]; - else - tape = ide_drv_g(disk, ide_tape_obj); - + tape = ide_drv_g(disk, ide_tape_obj); if (tape) { if (ide_device_get(tape->drive)) tape = NULL; else get_device(&tape->dev); } - mutex_unlock(&idetape_ref_mutex); return tape; } @@ -275,6 +266,24 @@ static void ide_tape_put(struct ide_tape_obj *tape) mutex_unlock(&idetape_ref_mutex); } +/* + * The variables below are used for the character device interface. Additional + * state variables are defined in our ide_drive_t structure. + */ +static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES]; + +static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i) +{ + struct ide_tape_obj *tape = NULL; + + mutex_lock(&idetape_ref_mutex); + tape = idetape_devs[i]; + if (tape) + get_device(&tape->dev); + mutex_unlock(&idetape_ref_mutex); + return tape; +} + /* * called on each failed packet command retry to analyze the request sense. We * currently do not utilize this information. @@ -388,8 +397,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc) if (readpos[0] & 0x4) { printk(KERN_INFO "ide-tape: Block location is unknown" "to the tape\n"); - clear_bit(ilog2(IDE_AFLAG_ADDRESS_VALID), - &drive->atapi_flags); + clear_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags); uptodate = 0; err = IDE_DRV_ERROR_GENERAL; } else { @@ -398,8 +406,7 @@ static int ide_tape_callback(ide_drive_t *drive, int dsc) tape->partition = readpos[1]; tape->first_frame = be32_to_cpup((__be32 *)&readpos[4]); - set_bit(ilog2(IDE_AFLAG_ADDRESS_VALID), - &drive->atapi_flags); + set_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags); } } @@ -649,15 +656,15 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, if ((drive->dev_flags & IDE_DFLAG_DSC_OVERLAP) == 0 && (rq->cmd[13] & REQ_IDETAPE_PC2) == 0) - drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC; + set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); if (drive->dev_flags & IDE_DFLAG_POST_RESET) { - drive->atapi_flags |= IDE_AFLAG_IGNORE_DSC; + set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); drive->dev_flags &= ~IDE_DFLAG_POST_RESET; } - if (!(drive->atapi_flags & IDE_AFLAG_IGNORE_DSC) && - !(stat & ATA_DSC)) { + if (!test_and_clear_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags) && + (stat & ATA_DSC) == 0) { if (postponed_rq == NULL) { tape->dsc_polling_start = jiffies; tape->dsc_poll_freq = tape->best_dsc_rw_freq; @@ -677,9 +684,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW; idetape_postpone_request(drive); return ide_stopped; - } else - drive->atapi_flags &= ~IDE_AFLAG_IGNORE_DSC; - + } if (rq->cmd[13] & REQ_IDETAPE_READ) { pc = &tape->queued_pc; ide_tape_create_rw_cmd(tape, pc, rq, READ_6); @@ -739,7 +744,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout) int load_attempted = 0; /* Wait for the tape to become ready */ - set_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT), &drive->atapi_flags); + set_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags); timeout += jiffies; while (time_before(jiffies, timeout)) { if (ide_do_test_unit_ready(drive, disk) == 0) @@ -815,7 +820,7 @@ static void __ide_tape_discard_merge_buffer(ide_drive_t *drive) if (tape->chrdev_dir != IDETAPE_DIR_READ) return; - clear_bit(ilog2(IDE_AFLAG_FILEMARK), &drive->atapi_flags); + clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags); tape->valid = 0; if (tape->buf != NULL) { kfree(tape->buf); @@ -1108,8 +1113,7 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op, if (tape->chrdev_dir == IDETAPE_DIR_READ) { tape->valid = 0; - if (test_and_clear_bit(ilog2(IDE_AFLAG_FILEMARK), - &drive->atapi_flags)) + if (test_and_clear_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) ++count; ide_tape_discard_merge_buffer(drive, 0); } @@ -1164,7 +1168,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf, debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count); if (tape->chrdev_dir != IDETAPE_DIR_READ) { - if (test_bit(ilog2(IDE_AFLAG_DETECT_BS), &drive->atapi_flags)) + if (test_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags)) if (count > tape->blk_size && (count % tape->blk_size) == 0) tape->user_bs_factor = count / tape->blk_size; @@ -1180,8 +1184,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf, /* refill if staging buffer is empty */ if (!tape->valid) { /* If we are at a filemark, nothing more to read */ - if (test_bit(ilog2(IDE_AFLAG_FILEMARK), - &drive->atapi_flags)) + if (test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) break; /* read */ if (idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, @@ -1199,7 +1202,7 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf, done += todo; } - if (!done && test_bit(ilog2(IDE_AFLAG_FILEMARK), &drive->atapi_flags)) { + if (!done && test_bit(IDE_AFLAG_FILEMARK, &drive->atapi_flags)) { debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name); idetape_space_over_filemarks(drive, MTFSF, 1); @@ -1333,8 +1336,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count) ide_tape_discard_merge_buffer(drive, 0); retval = ide_do_start_stop(drive, disk, !IDETAPE_LU_LOAD_MASK); if (!retval) - clear_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT), - &drive->atapi_flags); + clear_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags); return retval; case MTNOP: ide_tape_discard_merge_buffer(drive, 0); @@ -1356,11 +1358,9 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count) mt_count % tape->blk_size) return -EIO; tape->user_bs_factor = mt_count / tape->blk_size; - clear_bit(ilog2(IDE_AFLAG_DETECT_BS), - &drive->atapi_flags); + clear_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags); } else - set_bit(ilog2(IDE_AFLAG_DETECT_BS), - &drive->atapi_flags); + set_bit(IDE_AFLAG_DETECT_BS, &drive->atapi_flags); return 0; case MTSEEK: ide_tape_discard_merge_buffer(drive, 0); @@ -1486,7 +1486,7 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp) return -ENXIO; lock_kernel(); - tape = ide_tape_get(NULL, true, i); + tape = ide_tape_chrdev_get(i); if (!tape) { unlock_kernel(); return -ENXIO; @@ -1505,20 +1505,20 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp) filp->private_data = tape; - if (test_and_set_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags)) { + if (test_and_set_bit(IDE_AFLAG_BUSY, &drive->atapi_flags)) { retval = -EBUSY; goto out_put_tape; } retval = idetape_wait_ready(drive, 60 * HZ); if (retval) { - clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags); + clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags); printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name); goto out_put_tape; } idetape_read_position(drive); - if (!test_bit(ilog2(IDE_AFLAG_ADDRESS_VALID), &drive->atapi_flags)) + if (!test_bit(IDE_AFLAG_ADDRESS_VALID, &drive->atapi_flags)) (void)idetape_rewind_tape(drive); /* Read block size and write protect status from drive. */ @@ -1534,7 +1534,7 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp) if (tape->write_prot) { if ((filp->f_flags & O_ACCMODE) == O_WRONLY || (filp->f_flags & O_ACCMODE) == O_RDWR) { - clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags); + clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags); retval = -EROFS; goto out_put_tape; } @@ -1591,17 +1591,15 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp) ide_tape_discard_merge_buffer(drive, 1); } - if (minor < 128 && test_bit(ilog2(IDE_AFLAG_MEDIUM_PRESENT), - &drive->atapi_flags)) + if (minor < 128 && test_bit(IDE_AFLAG_MEDIUM_PRESENT, &drive->atapi_flags)) (void) idetape_rewind_tape(drive); - if (tape->chrdev_dir == IDETAPE_DIR_NONE) { if (tape->door_locked == DOOR_LOCKED) { if (!ide_set_media_lock(drive, tape->disk, 0)) tape->door_locked = DOOR_UNLOCKED; } } - clear_bit(ilog2(IDE_AFLAG_BUSY), &drive->atapi_flags); + clear_bit(IDE_AFLAG_BUSY, &drive->atapi_flags); ide_tape_put(tape); unlock_kernel(); return 0; @@ -1907,7 +1905,7 @@ static const struct file_operations idetape_fops = { static int idetape_open(struct block_device *bdev, fmode_t mode) { - struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk, false, 0); + struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk); if (!tape) return -ENXIO; diff --git a/trunk/drivers/ide/ide-taskfile.c b/trunk/drivers/ide/ide-taskfile.c index 75b85a8cd2d4..a0c3e1b2f73c 100644 --- a/trunk/drivers/ide/ide-taskfile.c +++ b/trunk/drivers/ide/ide-taskfile.c @@ -98,6 +98,7 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd) if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) { ide_tf_dump(drive->name, cmd); tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); + SELECT_MASK(drive, 0); if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) { u8 data[2] = { cmd->tf.data, cmd->hob.data }; @@ -165,7 +166,7 @@ static ide_startstop_t task_no_data_intr(ide_drive_t *drive) if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) { if (custom && tf->command == ATA_CMD_SET_MULTI) { drive->mult_req = drive->mult_count = 0; - drive->special_flags |= IDE_SFLAG_RECALIBRATE; + drive->special.b.recalibrate = 1; (void)ide_dump_status(drive, __func__, stat); return ide_stopped; } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) { diff --git a/trunk/drivers/ide/ide.c b/trunk/drivers/ide/ide.c index 16d056939f9f..92c9b90931e7 100644 --- a/trunk/drivers/ide/ide.c +++ b/trunk/drivers/ide/ide.c @@ -211,11 +211,6 @@ static unsigned int ide_noflush; module_param_call(noflush, ide_set_dev_param_mask, NULL, &ide_noflush, 0); MODULE_PARM_DESC(noflush, "disable flush requests for a device"); -static unsigned int ide_nohpa; - -module_param_call(nohpa, ide_set_dev_param_mask, NULL, &ide_nohpa, 0); -MODULE_PARM_DESC(nohpa, "disable Host Protected Area for a device"); - static unsigned int ide_noprobe; module_param_call(noprobe, ide_set_dev_param_mask, NULL, &ide_noprobe, 0); @@ -286,11 +281,6 @@ static void ide_dev_apply_params(ide_drive_t *drive, u8 unit) drive->name); drive->dev_flags |= IDE_DFLAG_NOFLUSH; } - if (ide_nohpa & (1 << i)) { - printk(KERN_INFO "ide: disabling Host Protected Area for %s\n", - drive->name); - drive->dev_flags |= IDE_DFLAG_NOHPA; - } if (ide_noprobe & (1 << i)) { printk(KERN_INFO "ide: skipping probe for %s\n", drive->name); drive->dev_flags |= IDE_DFLAG_NOPROBE; diff --git a/trunk/drivers/ide/ide_platform.c b/trunk/drivers/ide/ide_platform.c index ee9b55ecc62b..051b4ab0f359 100644 --- a/trunk/drivers/ide/ide_platform.c +++ b/trunk/drivers/ide/ide_platform.c @@ -21,7 +21,7 @@ #include #include -static void __devinit plat_ide_setup_ports(struct ide_hw *hw, +static void __devinit plat_ide_setup_ports(hw_regs_t *hw, void __iomem *base, void __iomem *ctrl, struct pata_platform_info *pdata, @@ -40,11 +40,12 @@ static void __devinit plat_ide_setup_ports(struct ide_hw *hw, hw->io_ports.ctl_addr = (unsigned long)ctrl; hw->irq = irq; + + hw->chipset = ide_generic; } static const struct ide_port_info platform_ide_port_info = { .host_flags = IDE_HFLAG_NO_DMA, - .chipset = ide_generic, }; static int __devinit plat_ide_probe(struct platform_device *pdev) @@ -54,7 +55,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev) struct pata_platform_info *pdata; struct ide_host *host; int ret = 0, mmio = 0; - struct ide_hw hw, *hws[] = { &hw }; + hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; struct ide_port_info d = platform_ide_port_info; pdata = pdev->dev.platform_data; @@ -98,7 +99,7 @@ static int __devinit plat_ide_probe(struct platform_device *pdev) if (mmio) d.host_flags |= IDE_HFLAG_MMIO; - ret = ide_host_add(&d, hws, 1, &host); + ret = ide_host_add(&d, hws, &host); if (ret) goto out; diff --git a/trunk/drivers/ide/macide.c b/trunk/drivers/ide/macide.c index 1447c8c90565..4b1718e83283 100644 --- a/trunk/drivers/ide/macide.c +++ b/trunk/drivers/ide/macide.c @@ -62,7 +62,7 @@ int macide_ack_intr(ide_hwif_t* hwif) return 0; } -static void __init macide_setup_ports(struct ide_hw *hw, unsigned long base, +static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base, int irq, ide_ack_intr_t *ack_intr) { int i; @@ -76,12 +76,13 @@ static void __init macide_setup_ports(struct ide_hw *hw, unsigned long base, hw->irq = irq; hw->ack_intr = ack_intr; + + hw->chipset = ide_generic; } static const struct ide_port_info macide_port_info = { .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, - .chipset = ide_generic, }; static const char *mac_ide_name[] = @@ -96,7 +97,7 @@ static int __init macide_init(void) ide_ack_intr_t *ack_intr; unsigned long base; int irq; - struct ide_hw hw, *hws[] = { &hw }; + hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; if (!MACH_IS_MAC) return -ENODEV; @@ -126,7 +127,7 @@ static int __init macide_init(void) macide_setup_ports(&hw, base, irq, ack_intr); - return ide_host_add(&macide_port_info, hws, 1, NULL); + return ide_host_add(&macide_port_info, hws, NULL); } module_init(macide_init); diff --git a/trunk/drivers/ide/palm_bk3710.c b/trunk/drivers/ide/palm_bk3710.c index 3c1dc0152153..09d813d313f4 100644 --- a/trunk/drivers/ide/palm_bk3710.c +++ b/trunk/drivers/ide/palm_bk3710.c @@ -306,7 +306,6 @@ static struct ide_port_info __devinitdata palm_bk3710_port_info = { .host_flags = IDE_HFLAG_MMIO, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, - .chipset = ide_palm3710, }; static int __init palm_bk3710_probe(struct platform_device *pdev) @@ -316,7 +315,7 @@ static int __init palm_bk3710_probe(struct platform_device *pdev) void __iomem *base; unsigned long rate, mem_size; int i, rc; - struct ide_hw hw, *hws[] = { &hw }; + hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; clk = clk_get(&pdev->dev, "IDECLK"); if (IS_ERR(clk)) @@ -364,12 +363,13 @@ static int __init palm_bk3710_probe(struct platform_device *pdev) (base + IDE_PALM_ATA_PRI_CTL_OFFSET); hw.irq = irq->start; hw.dev = &pdev->dev; + hw.chipset = ide_palm3710; palm_bk3710_port_info.udma_mask = rate < 100000000 ? ATA_UDMA4 : ATA_UDMA5; /* Register the IDE interface with Linux */ - rc = ide_host_add(&palm_bk3710_port_info, hws, 1, NULL); + rc = ide_host_add(&palm_bk3710_port_info, hws, NULL); if (rc) goto out; diff --git a/trunk/drivers/ide/pdc202xx_new.c b/trunk/drivers/ide/pdc202xx_new.c index 65ba8239e7b5..b68906c3c17e 100644 --- a/trunk/drivers/ide/pdc202xx_new.c +++ b/trunk/drivers/ide/pdc202xx_new.c @@ -40,6 +40,18 @@ #define DBG(fmt, args...) #endif +static const char *pdc_quirk_drives[] = { + "QUANTUM FIREBALLlct08 08", + "QUANTUM FIREBALLP KA6.4", + "QUANTUM FIREBALLP KA9.1", + "QUANTUM FIREBALLP LM20.4", + "QUANTUM FIREBALLP KX13.6", + "QUANTUM FIREBALLP KX20.5", + "QUANTUM FIREBALLP KX27.3", + "QUANTUM FIREBALLP LM20.5", + NULL +}; + static u8 max_dma_rate(struct pci_dev *pdev) { u8 mode; @@ -188,6 +200,19 @@ static u8 pdcnew_cable_detect(ide_hwif_t *hwif) return ATA_CBL_PATA80; } +static void pdcnew_quirkproc(ide_drive_t *drive) +{ + const char **list, *m = (char *)&drive->id[ATA_ID_PROD]; + + for (list = pdc_quirk_drives; *list != NULL; list++) + if (strstr(m, *list) != NULL) { + drive->quirk_list = 2; + return; + } + + drive->quirk_list = 0; +} + static void pdcnew_reset(ide_drive_t *drive) { /* @@ -448,6 +473,7 @@ static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev) static const struct ide_port_ops pdcnew_port_ops = { .set_pio_mode = pdcnew_set_pio_mode, .set_dma_mode = pdcnew_set_dma_mode, + .quirkproc = pdcnew_quirkproc, .resetproc = pdcnew_reset, .cable_detect = pdcnew_cable_detect, }; diff --git a/trunk/drivers/ide/pdc202xx_old.c b/trunk/drivers/ide/pdc202xx_old.c index b6abf7e52cac..e24ecc87a9b1 100644 --- a/trunk/drivers/ide/pdc202xx_old.c +++ b/trunk/drivers/ide/pdc202xx_old.c @@ -23,6 +23,18 @@ #define PDC202XX_DEBUG_DRIVE_INFO 0 +static const char *pdc_quirk_drives[] = { + "QUANTUM FIREBALLlct08 08", + "QUANTUM FIREBALLP KA6.4", + "QUANTUM FIREBALLP KA9.1", + "QUANTUM FIREBALLP LM20.4", + "QUANTUM FIREBALLP KX13.6", + "QUANTUM FIREBALLP KX20.5", + "QUANTUM FIREBALLP KX27.3", + "QUANTUM FIREBALLP LM20.5", + NULL +}; + static void pdc_old_disable_66MHz_clock(ide_hwif_t *); static void pdc202xx_set_mode(ide_drive_t *drive, const u8 speed) @@ -139,6 +151,19 @@ static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif) outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg); } +static void pdc202xx_quirkproc(ide_drive_t *drive) +{ + const char **list, *m = (char *)&drive->id[ATA_ID_PROD]; + + for (list = pdc_quirk_drives; *list != NULL; list++) + if (strstr(m, *list) != NULL) { + drive->quirk_list = 2; + return; + } + + drive->quirk_list = 0; +} + static void pdc202xx_dma_start(ide_drive_t *drive) { if (drive->current_speed > XFER_UDMA_2) @@ -178,6 +203,52 @@ static int pdc202xx_dma_end(ide_drive_t *drive) return ide_dma_end(drive); } +static int pdc202xx_dma_test_irq(ide_drive_t *drive) +{ + ide_hwif_t *hwif = drive->hwif; + unsigned long high_16 = hwif->extra_base - 16; + u8 dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); + u8 sc1d = inb(high_16 + 0x001d); + + if (hwif->channel) { + /* bit7: Error, bit6: Interrupting, bit5: FIFO Full, bit4: FIFO Empty */ + if ((sc1d & 0x50) == 0x50) + goto somebody_else; + else if ((sc1d & 0x40) == 0x40) + return (dma_stat & 4) == 4; + } else { + /* bit3: Error, bit2: Interrupting, bit1: FIFO Full, bit0: FIFO Empty */ + if ((sc1d & 0x05) == 0x05) + goto somebody_else; + else if ((sc1d & 0x04) == 0x04) + return (dma_stat & 4) == 4; + } +somebody_else: + return (dma_stat & 4) == 4; /* return 1 if INTR asserted */ +} + +static void pdc202xx_reset(ide_drive_t *drive) +{ + ide_hwif_t *hwif = drive->hwif; + unsigned long high_16 = hwif->extra_base - 16; + u8 udma_speed_flag = inb(high_16 | 0x001f); + + printk(KERN_WARNING "PDC202xx: software reset...\n"); + + outb(udma_speed_flag | 0x10, high_16 | 0x001f); + mdelay(100); + outb(udma_speed_flag & ~0x10, high_16 | 0x001f); + mdelay(2000); /* 2 seconds ?! */ + + ide_set_max_pio(drive); +} + +static void pdc202xx_dma_lost_irq(ide_drive_t *drive) +{ + pdc202xx_reset(drive); + ide_dma_lost_irq(drive); +} + static int init_chipset_pdc202xx(struct pci_dev *dev) { unsigned long dmabase = pci_resource_start(dev, 4); @@ -231,22 +302,37 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev, static const struct ide_port_ops pdc20246_port_ops = { .set_pio_mode = pdc202xx_set_pio_mode, .set_dma_mode = pdc202xx_set_mode, + .quirkproc = pdc202xx_quirkproc, }; static const struct ide_port_ops pdc2026x_port_ops = { .set_pio_mode = pdc202xx_set_pio_mode, .set_dma_mode = pdc202xx_set_mode, + .quirkproc = pdc202xx_quirkproc, + .resetproc = pdc202xx_reset, .cable_detect = pdc2026x_cable_detect, }; +static const struct ide_dma_ops pdc20246_dma_ops = { + .dma_host_set = ide_dma_host_set, + .dma_setup = ide_dma_setup, + .dma_start = ide_dma_start, + .dma_end = ide_dma_end, + .dma_test_irq = pdc202xx_dma_test_irq, + .dma_lost_irq = ide_dma_lost_irq, + .dma_timer_expiry = ide_dma_sff_timer_expiry, + .dma_sff_read_status = ide_dma_sff_read_status, +}; + static const struct ide_dma_ops pdc2026x_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = pdc202xx_dma_start, .dma_end = pdc202xx_dma_end, - .dma_test_irq = ide_dma_test_irq, - .dma_lost_irq = ide_dma_lost_irq, + .dma_test_irq = pdc202xx_dma_test_irq, + .dma_lost_irq = pdc202xx_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, + .dma_clear = pdc202xx_reset, .dma_sff_read_status = ide_dma_sff_read_status, }; @@ -268,7 +354,7 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = { .name = DRV_NAME, .init_chipset = init_chipset_pdc202xx, .port_ops = &pdc20246_port_ops, - .dma_ops = &sff_dma_ops, + .dma_ops = &pdc20246_dma_ops, .host_flags = IDE_HFLAGS_PDC202XX, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, diff --git a/trunk/drivers/ide/pmac.c b/trunk/drivers/ide/pmac.c index 97642a7a79c4..f76e4e6b408f 100644 --- a/trunk/drivers/ide/pmac.c +++ b/trunk/drivers/ide/pmac.c @@ -1023,14 +1023,13 @@ static const struct ide_port_info pmac_port_info = { * Setup, register & probe an IDE channel driven by this driver, this is * called by one of the 2 probe functions (macio or PCI). */ -static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, - struct ide_hw *hw) +static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, hw_regs_t *hw) { struct device_node *np = pmif->node; const int *bidp; struct ide_host *host; ide_hwif_t *hwif; - struct ide_hw *hws[] = { hw }; + hw_regs_t *hws[] = { hw, NULL, NULL, NULL }; struct ide_port_info d = pmac_port_info; int rc; @@ -1078,7 +1077,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, /* Make sure we have sane timings */ sanitize_timings(pmif); - host = ide_host_alloc(&d, hws, 1); + host = ide_host_alloc(&d, hws); if (host == NULL) return -ENOMEM; hwif = host->ports[0]; @@ -1125,7 +1124,7 @@ static int __devinit pmac_ide_setup_device(pmac_ide_hwif_t *pmif, return 0; } -static void __devinit pmac_ide_init_ports(struct ide_hw *hw, unsigned long base) +static void __devinit pmac_ide_init_ports(hw_regs_t *hw, unsigned long base) { int i; @@ -1145,7 +1144,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match) unsigned long regbase; pmac_ide_hwif_t *pmif; int irq, rc; - struct ide_hw hw; + hw_regs_t hw; pmif = kzalloc(sizeof(*pmif), GFP_KERNEL); if (pmif == NULL) @@ -1269,7 +1268,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id) void __iomem *base; unsigned long rbase, rlen; int rc; - struct ide_hw hw; + hw_regs_t hw; np = pci_device_to_OF_node(pdev); if (np == NULL) { diff --git a/trunk/drivers/ide/q40ide.c b/trunk/drivers/ide/q40ide.c index ab49a97023d9..c79346679244 100644 --- a/trunk/drivers/ide/q40ide.c +++ b/trunk/drivers/ide/q40ide.c @@ -51,11 +51,11 @@ static int q40ide_default_irq(unsigned long base) /* * Addresses are pretranslated for Q40 ISA access. */ -static void q40_ide_setup_ports(struct ide_hw *hw, unsigned long base, +static void q40_ide_setup_ports(hw_regs_t *hw, unsigned long base, ide_ack_intr_t *ack_intr, int irq) { - memset(hw, 0, sizeof(*hw)); + memset(hw, 0, sizeof(hw_regs_t)); /* BIG FAT WARNING: assumption: only DATA port is ever used in 16 bit mode */ hw->io_ports.data_addr = Q40_ISA_IO_W(base); @@ -70,6 +70,8 @@ static void q40_ide_setup_ports(struct ide_hw *hw, unsigned long base, hw->irq = irq; hw->ack_intr = ack_intr; + + hw->chipset = ide_generic; } static void q40ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, @@ -117,7 +119,6 @@ static const struct ide_port_info q40ide_port_info = { .tp_ops = &q40ide_tp_ops, .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, .irq_flags = IRQF_SHARED, - .chipset = ide_generic, }; /* @@ -135,7 +136,7 @@ static const char *q40_ide_names[Q40IDE_NUM_HWIFS]={ static int __init q40ide_init(void) { int i; - struct ide_hw hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL }; + hw_regs_t hw[Q40IDE_NUM_HWIFS], *hws[] = { NULL, NULL, NULL, NULL }; if (!MACH_IS_Q40) return -ENODEV; @@ -162,7 +163,7 @@ static int __init q40ide_init(void) hws[i] = &hw[i]; } - return ide_host_add(&q40ide_port_info, hws, Q40IDE_NUM_HWIFS, NULL); + return ide_host_add(&q40ide_port_info, hws, NULL); } module_init(q40ide_init); diff --git a/trunk/drivers/ide/rapide.c b/trunk/drivers/ide/rapide.c index 00f54248f41f..d5003ca69801 100644 --- a/trunk/drivers/ide/rapide.c +++ b/trunk/drivers/ide/rapide.c @@ -13,10 +13,9 @@ static const struct ide_port_info rapide_port_info = { .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA, - .chipset = ide_generic, }; -static void rapide_setup_ports(struct ide_hw *hw, void __iomem *base, +static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base, void __iomem *ctrl, unsigned int sz, int irq) { unsigned long port = (unsigned long)base; @@ -36,7 +35,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id) void __iomem *base; struct ide_host *host; int ret; - struct ide_hw hw, *hws[] = { &hw }; + hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; ret = ecard_request_resources(ec); if (ret) @@ -50,9 +49,10 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id) memset(&hw, 0, sizeof(hw)); rapide_setup_ports(&hw, base, base + 0x818, 1 << 6, ec->irq); + hw.chipset = ide_generic; hw.dev = &ec->dev; - ret = ide_host_add(&rapide_port_info, hws, 1, &host); + ret = ide_host_add(&rapide_port_info, hws, &host); if (ret) goto release; diff --git a/trunk/drivers/ide/scc_pata.c b/trunk/drivers/ide/scc_pata.c index 1104bb301eb9..5be41f25204f 100644 --- a/trunk/drivers/ide/scc_pata.c +++ b/trunk/drivers/ide/scc_pata.c @@ -559,7 +559,7 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev, { struct scc_ports *ports = pci_get_drvdata(dev); struct ide_host *host; - struct ide_hw hw, *hws[] = { &hw }; + hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; int i, rc; memset(&hw, 0, sizeof(hw)); @@ -567,8 +567,9 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev, hw.io_ports_array[i] = ports->dma + 0x20 + i * 4; hw.irq = dev->irq; hw.dev = &dev->dev; + hw.chipset = ide_pci; - rc = ide_host_add(d, hws, 1, &host); + rc = ide_host_add(d, hws, &host); if (rc) return rc; @@ -822,7 +823,6 @@ static const struct ide_port_info scc_chipset __devinitdata = { .host_flags = IDE_HFLAG_SINGLE, .irq_flags = IRQF_SHARED, .pio_mask = ATA_PIO4, - .chipset = ide_pci, }; /** diff --git a/trunk/drivers/ide/setup-pci.c b/trunk/drivers/ide/setup-pci.c index ab3db61d2ba0..7a3a12d6e638 100644 --- a/trunk/drivers/ide/setup-pci.c +++ b/trunk/drivers/ide/setup-pci.c @@ -1,7 +1,7 @@ /* * Copyright (C) 1998-2000 Andre Hedrick * Copyright (C) 1995-1998 Mark Lord - * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz + * Copyright (C) 2007 Bartlomiej Zolnierkiewicz * * May be copied or modified under the terms of the GNU General Public License */ @@ -301,11 +301,11 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info * } /** - * ide_hw_configure - configure a struct ide_hw instance + * ide_hw_configure - configure a hw_regs_t instance * @dev: PCI device holding interface * @d: IDE port info * @port: port number - * @hw: struct ide_hw instance corresponding to this port + * @hw: hw_regs_t instance corresponding to this port * * Perform the initial set up for the hardware interface structure. This * is done per interface port rather than per PCI device. There may be @@ -315,7 +315,7 @@ static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info * */ static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d, - unsigned int port, struct ide_hw *hw) + unsigned int port, hw_regs_t *hw) { unsigned long ctl = 0, base = 0; @@ -344,6 +344,7 @@ static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d, memset(hw, 0, sizeof(*hw)); hw->dev = &dev->dev; + hw->chipset = d->chipset ? d->chipset : ide_pci; ide_std_init_ports(hw, base, ctl | 2); return 0; @@ -445,8 +446,8 @@ static int ide_setup_pci_controller(struct pci_dev *dev, * ide_pci_setup_ports - configure ports/devices on PCI IDE * @dev: PCI device * @d: IDE port info - * @hw: struct ide_hw instances corresponding to this PCI IDE device - * @hws: struct ide_hw pointers table to update + * @hw: hw_regs_t instances corresponding to this PCI IDE device + * @hws: hw_regs_t pointers table to update * * Scan the interfaces attached to this device and do any * necessary per port setup. Attach the devices and ask the @@ -458,7 +459,7 @@ static int ide_setup_pci_controller(struct pci_dev *dev, */ void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, - struct ide_hw *hw, struct ide_hw **hws) + hw_regs_t *hw, hw_regs_t **hws) { int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port; u8 tmp; @@ -534,15 +535,61 @@ static int do_ide_setup_pci_device(struct pci_dev *dev, return ret; } +int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d, + void *priv) +{ + struct ide_host *host; + hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL }; + int ret; + + ret = ide_setup_pci_controller(dev, d, 1); + if (ret < 0) + goto out; + + ide_pci_setup_ports(dev, d, &hw[0], &hws[0]); + + host = ide_host_alloc(d, hws); + if (host == NULL) { + ret = -ENOMEM; + goto out; + } + + host->dev[0] = &dev->dev; + + host->host_priv = priv; + + host->irq_flags = IRQF_SHARED; + + pci_set_drvdata(dev, host); + + ret = do_ide_setup_pci_device(dev, d, 1); + if (ret < 0) + goto out; + + /* fixup IRQ */ + if (ide_pci_is_in_compatibility_mode(dev)) { + hw[0].irq = pci_get_legacy_ide_irq(dev, 0); + hw[1].irq = pci_get_legacy_ide_irq(dev, 1); + } else + hw[1].irq = hw[0].irq = ret; + + ret = ide_host_register(host, d, hws); + if (ret) + ide_host_free(host); +out: + return ret; +} +EXPORT_SYMBOL_GPL(ide_pci_init_one); + int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, const struct ide_port_info *d, void *priv) { struct pci_dev *pdev[] = { dev1, dev2 }; struct ide_host *host; - int ret, i, n_ports = dev2 ? 4 : 2; - struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL }; + int ret, i; + hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL }; - for (i = 0; i < n_ports / 2; i++) { + for (i = 0; i < 2; i++) { ret = ide_setup_pci_controller(pdev[i], d, !i); if (ret < 0) goto out; @@ -550,24 +597,23 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]); } - host = ide_host_alloc(d, hws, n_ports); + host = ide_host_alloc(d, hws); if (host == NULL) { ret = -ENOMEM; goto out; } host->dev[0] = &dev1->dev; - if (dev2) - host->dev[1] = &dev2->dev; + host->dev[1] = &dev2->dev; host->host_priv = priv; + host->irq_flags = IRQF_SHARED; pci_set_drvdata(pdev[0], host); - if (dev2) - pci_set_drvdata(pdev[1], host); + pci_set_drvdata(pdev[1], host); - for (i = 0; i < n_ports / 2; i++) { + for (i = 0; i < 2; i++) { ret = do_ide_setup_pci_device(pdev[i], d, !i); /* @@ -593,13 +639,6 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, } EXPORT_SYMBOL_GPL(ide_pci_init_two); -int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d, - void *priv) -{ - return ide_pci_init_two(dev, NULL, d, priv); -} -EXPORT_SYMBOL_GPL(ide_pci_init_one); - void ide_pci_remove(struct pci_dev *dev) { struct ide_host *host = pci_get_drvdata(dev); diff --git a/trunk/drivers/ide/sgiioc4.c b/trunk/drivers/ide/sgiioc4.c index 5f37f168f944..e5d2a48a84de 100644 --- a/trunk/drivers/ide/sgiioc4.c +++ b/trunk/drivers/ide/sgiioc4.c @@ -91,7 +91,7 @@ typedef struct { static void -sgiioc4_init_hwif_ports(struct ide_hw *hw, unsigned long data_port, +sgiioc4_init_hwif_ports(hw_regs_t * hw, unsigned long data_port, unsigned long ctrl_port, unsigned long irq_port) { unsigned long reg = data_port; @@ -546,7 +546,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) unsigned long cmd_base, irqport; unsigned long bar0, cmd_phys_base, ctl; void __iomem *virt_base; - struct ide_hw hw, *hws[] = { &hw }; + hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL }; int rc; /* Get the CmdBlk and CtrlBlk Base Registers */ @@ -575,12 +575,13 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev) memset(&hw, 0, sizeof(hw)); sgiioc4_init_hwif_ports(&hw, cmd_base, ctl, irqport); hw.irq = dev->irq; + hw.chipset = ide_pci; hw.dev = &dev->dev; /* Initializing chipset IRQ Registers */ writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4)); - rc = ide_host_add(&sgiioc4_port_info, hws, 1, NULL); + rc = ide_host_add(&sgiioc4_port_info, hws, NULL); if (!rc) return 0; diff --git a/trunk/drivers/ide/siimage.c b/trunk/drivers/ide/siimage.c index bd82d228608c..e4973cd1fba9 100644 --- a/trunk/drivers/ide/siimage.c +++ b/trunk/drivers/ide/siimage.c @@ -451,8 +451,8 @@ static int sil_sata_reset_poll(ide_drive_t *drive) static void sil_sata_pre_reset(ide_drive_t *drive) { if (drive->media == ide_disk) { - drive->special_flags &= - ~(IDE_SFLAG_SET_GEOMETRY | IDE_SFLAG_RECALIBRATE); + drive->special.b.set_geometry = 0; + drive->special.b.recalibrate = 0; } } diff --git a/trunk/drivers/ide/sl82c105.c b/trunk/drivers/ide/sl82c105.c index 0924abff52ff..b0a460625335 100644 --- a/trunk/drivers/ide/sl82c105.c +++ b/trunk/drivers/ide/sl82c105.c @@ -10,7 +10,7 @@ * with the timing registers setup. * -- Benjamin Herrenschmidt (01/11/03) benh@kernel.crashing.org * - * Copyright (C) 2006-2007,2009 MontaVista Software, Inc. + * Copyright (C) 2006-2007 MontaVista Software, Inc. * Copyright (C) 2007 Bartlomiej Zolnierkiewicz */ @@ -146,15 +146,14 @@ static void sl82c105_dma_lost_irq(ide_drive_t *drive) u32 val, mask = hwif->channel ? CTRL_IDE_IRQB : CTRL_IDE_IRQA; u8 dma_cmd; - printk(KERN_WARNING "sl82c105: lost IRQ, resetting host\n"); + printk("sl82c105: lost IRQ, resetting host\n"); /* * Check the raw interrupt from the drive. */ pci_read_config_dword(dev, 0x40, &val); if (val & mask) - printk(KERN_INFO "sl82c105: drive was requesting IRQ, " - "but host lost it\n"); + printk("sl82c105: drive was requesting IRQ, but host lost it\n"); /* * Was DMA enabled? If so, disable it - we're resetting the @@ -163,7 +162,7 @@ static void sl82c105_dma_lost_irq(ide_drive_t *drive) dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); if (dma_cmd & 1) { outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); - printk(KERN_INFO "sl82c105: DMA was enabled\n"); + printk("sl82c105: DMA was enabled\n"); } sl82c105_reset_host(dev); diff --git a/trunk/drivers/ide/tx4938ide.c b/trunk/drivers/ide/tx4938ide.c index ea89fddeed91..e33d764e2945 100644 --- a/trunk/drivers/ide/tx4938ide.c +++ b/trunk/drivers/ide/tx4938ide.c @@ -130,7 +130,8 @@ static const struct ide_port_info tx4938ide_port_info __initdata = { static int __init tx4938ide_probe(struct platform_device *pdev) { - struct ide_hw hw, *hws[] = { &hw }; + hw_regs_t hw; + hw_regs_t *hws[] = { &hw, NULL, NULL, NULL }; struct ide_host *host; struct resource *res; struct tx4938ide_platform_info *pdata = pdev->dev.platform_data; @@ -182,7 +183,7 @@ static int __init tx4938ide_probe(struct platform_device *pdev) tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, 0); else d.port_ops = NULL; - ret = ide_host_add(&d, hws, 1, &host); + ret = ide_host_add(&d, hws, &host); if (!ret) platform_set_drvdata(pdev, host); return ret; diff --git a/trunk/drivers/ide/tx4939ide.c b/trunk/drivers/ide/tx4939ide.c index 64b58ecc3f0e..5ca76224f6d1 100644 --- a/trunk/drivers/ide/tx4939ide.c +++ b/trunk/drivers/ide/tx4939ide.c @@ -537,7 +537,8 @@ static const struct ide_port_info tx4939ide_port_info __initdata = { static int __init tx4939ide_probe(struct platform_device *pdev) { - struct ide_hw hw, *hws[] = { &hw }; + hw_regs_t hw; + hw_regs_t *hws[] = { &hw, NULL, NULL, NULL }; struct ide_host *host; struct resource *res; int irq, ret; @@ -580,7 +581,7 @@ static int __init tx4939ide_probe(struct platform_device *pdev) hw.dev = &pdev->dev; pr_info("TX4939 IDE interface (base %#lx, irq %d)\n", mapbase, irq); - host = ide_host_alloc(&tx4939ide_port_info, hws, 1); + host = ide_host_alloc(&tx4939ide_port_info, hws); if (!host) return -ENOMEM; /* use extra_base for base address of the all registers */ diff --git a/trunk/drivers/infiniband/ulp/iser/iscsi_iser.c b/trunk/drivers/infiniband/ulp/iser/iscsi_iser.c index 0ba6ec876296..75223f50de58 100644 --- a/trunk/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/trunk/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -257,8 +257,11 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task) { struct iscsi_iser_task *iser_task = task->dd_data; - /* mgmt tasks do not need special cleanup */ - if (!task->sc) + /* + * mgmt tasks do not need special cleanup and we do not + * allocate anything in the init task callout + */ + if (!task->sc || task->state == ISCSI_TASK_PENDING) return; if (iser_task->status == ISER_TASK_STATUS_STARTED) { @@ -514,8 +517,7 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s } static struct iscsi_endpoint * -iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, - int non_blocking) +iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking) { int err; struct iser_conn *ib_conn; diff --git a/trunk/drivers/lguest/Kconfig b/trunk/drivers/lguest/Kconfig index 0aaa0597a622..a3d3cbab359a 100644 --- a/trunk/drivers/lguest/Kconfig +++ b/trunk/drivers/lguest/Kconfig @@ -1,6 +1,6 @@ config LGUEST tristate "Linux hypervisor example code" - depends on X86_32 && EXPERIMENTAL && EVENTFD + depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX select HVC_DRIVER ---help--- This is a very simple module which allows you to run diff --git a/trunk/drivers/lguest/core.c b/trunk/drivers/lguest/core.c index a6974e9b8ebf..4845fb3cf74b 100644 --- a/trunk/drivers/lguest/core.c +++ b/trunk/drivers/lguest/core.c @@ -95,7 +95,7 @@ static __init int map_switcher(void) * array of struct pages. It increments that pointer, but we don't * care. */ pagep = switcher_page; - err = map_vm_area(switcher_vma, PAGE_KERNEL_EXEC, &pagep); + err = map_vm_area(switcher_vma, PAGE_KERNEL, &pagep); if (err) { printk("lguest: map_vm_area failed: %i\n", err); goto free_vma; @@ -188,9 +188,6 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) { /* We stop running once the Guest is dead. */ while (!cpu->lg->dead) { - unsigned int irq; - bool more; - /* First we run any hypercalls the Guest wants done. */ if (cpu->hcall) do_hypercalls(cpu); @@ -198,23 +195,23 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) /* It's possible the Guest did a NOTIFY hypercall to the * Launcher, in which case we return from the read() now. */ if (cpu->pending_notify) { - if (!send_notify_to_eventfd(cpu)) { - if (put_user(cpu->pending_notify, user)) - return -EFAULT; - return sizeof(cpu->pending_notify); - } + if (put_user(cpu->pending_notify, user)) + return -EFAULT; + return sizeof(cpu->pending_notify); } /* Check for signals */ if (signal_pending(current)) return -ERESTARTSYS; + /* If Waker set break_out, return to Launcher. */ + if (cpu->break_out) + return -EAGAIN; + /* Check if there are any interrupts which can be delivered now: * if so, this sets up the hander to be executed when we next * run the Guest. */ - irq = interrupt_pending(cpu, &more); - if (irq < LGUEST_IRQS) - try_deliver_interrupt(cpu, irq, more); + maybe_do_interrupt(cpu); /* All long-lived kernel loops need to check with this horrible * thing called the freezer. If the Host is trying to suspend, @@ -227,15 +224,10 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) break; /* If the Guest asked to be stopped, we sleep. The Guest's - * clock timer will wake us. */ + * clock timer or LHREQ_BREAK from the Waker will wake us. */ if (cpu->halted) { set_current_state(TASK_INTERRUPTIBLE); - /* Just before we sleep, make sure no interrupt snuck in - * which we should be doing. */ - if (interrupt_pending(cpu, &more) < LGUEST_IRQS) - set_current_state(TASK_RUNNING); - else - schedule(); + schedule(); continue; } diff --git a/trunk/drivers/lguest/hypercalls.c b/trunk/drivers/lguest/hypercalls.c index c29ffa19cb74..54d66f05fefa 100644 --- a/trunk/drivers/lguest/hypercalls.c +++ b/trunk/drivers/lguest/hypercalls.c @@ -37,10 +37,6 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) /* This call does nothing, except by breaking out of the Guest * it makes us process all the asynchronous hypercalls. */ break; - case LHCALL_SEND_INTERRUPTS: - /* This call does nothing too, but by breaking out of the Guest - * it makes us process any pending interrupts. */ - break; case LHCALL_LGUEST_INIT: /* You can't get here unless you're already initialized. Don't * do that. */ @@ -77,21 +73,11 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) guest_set_stack(cpu, args->arg1, args->arg2, args->arg3); break; case LHCALL_SET_PTE: -#ifdef CONFIG_X86_PAE - guest_set_pte(cpu, args->arg1, args->arg2, - __pte(args->arg3 | (u64)args->arg4 << 32)); -#else guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3)); -#endif - break; - case LHCALL_SET_PGD: - guest_set_pgd(cpu->lg, args->arg1, args->arg2); break; -#ifdef CONFIG_X86_PAE case LHCALL_SET_PMD: guest_set_pmd(cpu->lg, args->arg1, args->arg2); break; -#endif case LHCALL_SET_CLOCKEVENT: guest_set_clockevent(cpu, args->arg1); break; diff --git a/trunk/drivers/lguest/interrupts_and_traps.c b/trunk/drivers/lguest/interrupts_and_traps.c index 0e9067b0d507..6e99adbe1946 100644 --- a/trunk/drivers/lguest/interrupts_and_traps.c +++ b/trunk/drivers/lguest/interrupts_and_traps.c @@ -128,39 +128,30 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, /*H:205 * Virtual Interrupts. * - * interrupt_pending() returns the first pending interrupt which isn't blocked - * by the Guest. It is called before every entry to the Guest, and just before - * we go to sleep when the Guest has halted itself. */ -unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more) + * maybe_do_interrupt() gets called before every entry to the Guest, to see if + * we should divert the Guest to running an interrupt handler. */ +void maybe_do_interrupt(struct lg_cpu *cpu) { unsigned int irq; DECLARE_BITMAP(blk, LGUEST_IRQS); + struct desc_struct *idt; /* If the Guest hasn't even initialized yet, we can do nothing. */ if (!cpu->lg->lguest_data) - return LGUEST_IRQS; + return; /* Take our "irqs_pending" array and remove any interrupts the Guest * wants blocked: the result ends up in "blk". */ if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts, sizeof(blk))) - return LGUEST_IRQS; + return; bitmap_andnot(blk, cpu->irqs_pending, blk, LGUEST_IRQS); /* Find the first interrupt. */ irq = find_first_bit(blk, LGUEST_IRQS); - *more = find_next_bit(blk, LGUEST_IRQS, irq+1); - - return irq; -} - -/* This actually diverts the Guest to running an interrupt handler, once an - * interrupt has been identified by interrupt_pending(). */ -void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) -{ - struct desc_struct *idt; - - BUG_ON(irq >= LGUEST_IRQS); + /* None? Nothing to do */ + if (irq >= LGUEST_IRQS) + return; /* They may be in the middle of an iret, where they asked us never to * deliver interrupts. */ @@ -179,12 +170,8 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) u32 irq_enabled; if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled)) irq_enabled = 0; - if (!irq_enabled) { - /* Make sure they know an IRQ is pending. */ - put_user(X86_EFLAGS_IF, - &cpu->lg->lguest_data->irq_pending); + if (!irq_enabled) return; - } } /* Look at the IDT entry the Guest gave us for this interrupt. The @@ -207,25 +194,6 @@ void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more) * here is a compromise which means at least it gets updated every * timer interrupt. */ write_timestamp(cpu); - - /* If there are no other interrupts we want to deliver, clear - * the pending flag. */ - if (!more) - put_user(0, &cpu->lg->lguest_data->irq_pending); -} - -/* And this is the routine when we want to set an interrupt for the Guest. */ -void set_interrupt(struct lg_cpu *cpu, unsigned int irq) -{ - /* Next time the Guest runs, the core code will see if it can deliver - * this interrupt. */ - set_bit(irq, cpu->irqs_pending); - - /* Make sure it sees it; it might be asleep (eg. halted), or - * running the Guest right now, in which case kick_process() - * will knock it out. */ - if (!wake_up_process(cpu->tsk)) - kick_process(cpu->tsk); } /*:*/ @@ -542,7 +510,10 @@ static enum hrtimer_restart clockdev_fn(struct hrtimer *timer) struct lg_cpu *cpu = container_of(timer, struct lg_cpu, hrt); /* Remember the first interrupt is the timer interrupt. */ - set_interrupt(cpu, 0); + set_bit(0, cpu->irqs_pending); + /* If the Guest is actually stopped, we need to wake it up. */ + if (cpu->halted) + wake_up_process(cpu->tsk); return HRTIMER_NORESTART; } diff --git a/trunk/drivers/lguest/lg.h b/trunk/drivers/lguest/lg.h index d4e8979735cb..af92a176697f 100644 --- a/trunk/drivers/lguest/lg.h +++ b/trunk/drivers/lguest/lg.h @@ -49,7 +49,7 @@ struct lg_cpu { u32 cr2; int ts; u32 esp1; - u16 ss1; + u8 ss1; /* Bitmap of what has changed: see CHANGED_* above. */ int changed; @@ -71,7 +71,9 @@ struct lg_cpu { /* Virtual clock device */ struct hrtimer hrt; - /* Did the Guest tell us to halt? */ + /* Do we need to stop what we're doing and return to userspace? */ + int break_out; + wait_queue_head_t break_wq; int halted; /* Pending virtual interrupts */ @@ -80,16 +82,6 @@ struct lg_cpu { struct lg_cpu_arch arch; }; -struct lg_eventfd { - unsigned long addr; - struct file *event; -}; - -struct lg_eventfd_map { - unsigned int num; - struct lg_eventfd map[]; -}; - /* The private info the thread maintains about the guest. */ struct lguest { @@ -110,8 +102,6 @@ struct lguest unsigned int stack_pages; u32 tsc_khz; - struct lg_eventfd_map *eventfds; - /* Dead? */ const char *dead; }; @@ -147,13 +137,9 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user); * in the kernel. */ #define pgd_flags(x) (pgd_val(x) & ~PAGE_MASK) #define pgd_pfn(x) (pgd_val(x) >> PAGE_SHIFT) -#define pmd_flags(x) (pmd_val(x) & ~PAGE_MASK) -#define pmd_pfn(x) (pmd_val(x) >> PAGE_SHIFT) /* interrupts_and_traps.c: */ -unsigned int interrupt_pending(struct lg_cpu *cpu, bool *more); -void try_deliver_interrupt(struct lg_cpu *cpu, unsigned int irq, bool more); -void set_interrupt(struct lg_cpu *cpu, unsigned int irq); +void maybe_do_interrupt(struct lg_cpu *cpu); bool deliver_trap(struct lg_cpu *cpu, unsigned int num); void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int i, u32 low, u32 hi); @@ -164,7 +150,6 @@ void setup_default_idt_entries(struct lguest_ro_state *state, void copy_traps(const struct lg_cpu *cpu, struct desc_struct *idt, const unsigned long *def); void guest_set_clockevent(struct lg_cpu *cpu, unsigned long delta); -bool send_notify_to_eventfd(struct lg_cpu *cpu); void init_clockdev(struct lg_cpu *cpu); bool check_syscall_vector(struct lguest *lg); int init_interrupts(void); @@ -183,10 +168,7 @@ void copy_gdt_tls(const struct lg_cpu *cpu, struct desc_struct *gdt); int init_guest_pagetable(struct lguest *lg); void free_guest_pagetable(struct lguest *lg); void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable); -void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 i); -#ifdef CONFIG_X86_PAE void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 i); -#endif void guest_pagetable_clear_all(struct lg_cpu *cpu); void guest_pagetable_flush_user(struct lg_cpu *cpu); void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, diff --git a/trunk/drivers/lguest/lguest_device.c b/trunk/drivers/lguest/lguest_device.c index e082cdac88b4..df44d962626d 100644 --- a/trunk/drivers/lguest/lguest_device.c +++ b/trunk/drivers/lguest/lguest_device.c @@ -228,8 +228,7 @@ extern void lguest_setup_irq(unsigned int irq); * function. */ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, unsigned index, - void (*callback)(struct virtqueue *vq), - const char *name) + void (*callback)(struct virtqueue *vq)) { struct lguest_device *ldev = to_lgdev(vdev); struct lguest_vq_info *lvq; @@ -264,7 +263,7 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, /* OK, tell virtio_ring.c to set up a virtqueue now we know its size * and we've got a pointer to its pages. */ vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, - vdev, lvq->pages, lg_notify, callback, name); + vdev, lvq->pages, lg_notify, callback); if (!vq) { err = -ENOMEM; goto unmap; @@ -313,38 +312,6 @@ static void lg_del_vq(struct virtqueue *vq) kfree(lvq); } -static void lg_del_vqs(struct virtio_device *vdev) -{ - struct virtqueue *vq, *n; - - list_for_each_entry_safe(vq, n, &vdev->vqs, list) - lg_del_vq(vq); -} - -static int lg_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char *names[]) -{ - struct lguest_device *ldev = to_lgdev(vdev); - int i; - - /* We must have this many virtqueues. */ - if (nvqs > ldev->desc->num_vq) - return -ENOENT; - - for (i = 0; i < nvqs; ++i) { - vqs[i] = lg_find_vq(vdev, i, callbacks[i], names[i]); - if (IS_ERR(vqs[i])) - goto error; - } - return 0; - -error: - lg_del_vqs(vdev); - return PTR_ERR(vqs[i]); -} - /* The ops structure which hooks everything together. */ static struct virtio_config_ops lguest_config_ops = { .get_features = lg_get_features, @@ -354,8 +321,8 @@ static struct virtio_config_ops lguest_config_ops = { .get_status = lg_get_status, .set_status = lg_set_status, .reset = lg_reset, - .find_vqs = lg_find_vqs, - .del_vqs = lg_del_vqs, + .find_vq = lg_find_vq, + .del_vq = lg_del_vq, }; /* The root device for the lguest virtio devices. This makes them appear as diff --git a/trunk/drivers/lguest/lguest_user.c b/trunk/drivers/lguest/lguest_user.c index 32e297121058..b8ee103eed5f 100644 --- a/trunk/drivers/lguest/lguest_user.c +++ b/trunk/drivers/lguest/lguest_user.c @@ -7,83 +7,32 @@ #include #include #include -#include -#include #include "lg.h" -bool send_notify_to_eventfd(struct lg_cpu *cpu) +/*L:055 When something happens, the Waker process needs a way to stop the + * kernel running the Guest and return to the Launcher. So the Waker writes + * LHREQ_BREAK and the value "1" to /dev/lguest to do this. Once the Launcher + * has done whatever needs attention, it writes LHREQ_BREAK and "0" to release + * the Waker. */ +static int break_guest_out(struct lg_cpu *cpu, const unsigned long __user*input) { - unsigned int i; - struct lg_eventfd_map *map; - - /* lg->eventfds is RCU-protected */ - rcu_read_lock(); - map = rcu_dereference(cpu->lg->eventfds); - for (i = 0; i < map->num; i++) { - if (map->map[i].addr == cpu->pending_notify) { - eventfd_signal(map->map[i].event, 1); - cpu->pending_notify = 0; - break; - } - } - rcu_read_unlock(); - return cpu->pending_notify == 0; -} - -static int add_eventfd(struct lguest *lg, unsigned long addr, int fd) -{ - struct lg_eventfd_map *new, *old = lg->eventfds; - - if (!addr) - return -EINVAL; - - /* Replace the old array with the new one, carefully: others can - * be accessing it at the same time */ - new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1), - GFP_KERNEL); - if (!new) - return -ENOMEM; - - /* First make identical copy. */ - memcpy(new->map, old->map, sizeof(old->map[0]) * old->num); - new->num = old->num; - - /* Now append new entry. */ - new->map[new->num].addr = addr; - new->map[new->num].event = eventfd_fget(fd); - if (IS_ERR(new->map[new->num].event)) { - kfree(new); - return PTR_ERR(new->map[new->num].event); - } - new->num++; - - /* Now put new one in place. */ - rcu_assign_pointer(lg->eventfds, new); - - /* We're not in a big hurry. Wait until noone's looking at old - * version, then delete it. */ - synchronize_rcu(); - kfree(old); - - return 0; -} - -static int attach_eventfd(struct lguest *lg, const unsigned long __user *input) -{ - unsigned long addr, fd; - int err; + unsigned long on; - if (get_user(addr, input) != 0) - return -EFAULT; - input++; - if (get_user(fd, input) != 0) + /* Fetch whether they're turning break on or off. */ + if (get_user(on, input) != 0) return -EFAULT; - mutex_lock(&lguest_lock); - err = add_eventfd(lg, addr, fd); - mutex_unlock(&lguest_lock); - - return 0; + if (on) { + cpu->break_out = 1; + /* Pop it out of the Guest (may be running on different CPU) */ + wake_up_process(cpu->tsk); + /* Wait for them to reset it */ + return wait_event_interruptible(cpu->break_wq, !cpu->break_out); + } else { + cpu->break_out = 0; + wake_up(&cpu->break_wq); + return 0; + } } /*L:050 Sending an interrupt is done by writing LHREQ_IRQ and an interrupt @@ -96,8 +45,9 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) return -EFAULT; if (irq >= LGUEST_IRQS) return -EINVAL; - - set_interrupt(cpu, irq); + /* Next time the Guest runs, the core code will see if it can deliver + * this interrupt. */ + set_bit(irq, cpu->irqs_pending); return 0; } @@ -176,6 +126,9 @@ static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip) * address. */ lguest_arch_setup_regs(cpu, start_ip); + /* Initialize the queue for the Waker to wait on */ + init_waitqueue_head(&cpu->break_wq); + /* We keep a pointer to the Launcher task (ie. current task) for when * other Guests want to wake this one (eg. console input). */ cpu->tsk = current; @@ -232,13 +185,6 @@ static int initialize(struct file *file, const unsigned long __user *input) goto unlock; } - lg->eventfds = kmalloc(sizeof(*lg->eventfds), GFP_KERNEL); - if (!lg->eventfds) { - err = -ENOMEM; - goto free_lg; - } - lg->eventfds->num = 0; - /* Populate the easy fields of our "struct lguest" */ lg->mem_base = (void __user *)args[0]; lg->pfn_limit = args[1]; @@ -246,7 +192,7 @@ static int initialize(struct file *file, const unsigned long __user *input) /* This is the first cpu (cpu 0) and it will start booting at args[2] */ err = lg_cpu_start(&lg->cpus[0], 0, args[2]); if (err) - goto free_eventfds; + goto release_guest; /* Initialize the Guest's shadow page tables, using the toplevel * address the Launcher gave us. This allocates memory, so can fail. */ @@ -265,9 +211,7 @@ static int initialize(struct file *file, const unsigned long __user *input) free_regs: /* FIXME: This should be in free_vcpu */ free_page(lg->cpus[0].regs_page); -free_eventfds: - kfree(lg->eventfds); -free_lg: +release_guest: kfree(lg); unlock: mutex_unlock(&lguest_lock); @@ -308,6 +252,11 @@ static ssize_t write(struct file *file, const char __user *in, /* Once the Guest is dead, you can only read() why it died. */ if (lg->dead) return -ENOENT; + + /* If you're not the task which owns the Guest, all you can do + * is break the Launcher out of running the Guest. */ + if (current != cpu->tsk && req != LHREQ_BREAK) + return -EPERM; } switch (req) { @@ -315,8 +264,8 @@ static ssize_t write(struct file *file, const char __user *in, return initialize(file, input); case LHREQ_IRQ: return user_send_irq(cpu, input); - case LHREQ_EVENTFD: - return attach_eventfd(lg, input); + case LHREQ_BREAK: + return break_guest_out(cpu, input); default: return -EINVAL; } @@ -354,12 +303,6 @@ static int close(struct inode *inode, struct file *file) * the Launcher's memory management structure. */ mmput(lg->cpus[i].mm); } - - /* Release any eventfds they registered. */ - for (i = 0; i < lg->eventfds->num; i++) - fput(lg->eventfds->map[i].event); - kfree(lg->eventfds); - /* If lg->dead doesn't contain an error code it will be NULL or a * kmalloc()ed string, either of which is ok to hand to kfree(). */ if (!IS_ERR(lg->dead)) diff --git a/trunk/drivers/lguest/page_tables.c b/trunk/drivers/lguest/page_tables.c index a6fe1abda240..a059cf9980f7 100644 --- a/trunk/drivers/lguest/page_tables.c +++ b/trunk/drivers/lguest/page_tables.c @@ -53,17 +53,6 @@ * page. */ #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) -/* For PAE we need the PMD index as well. We use the last 2MB, so we - * will need the last pmd entry of the last pmd page. */ -#ifdef CONFIG_X86_PAE -#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1) -#define RESERVE_MEM 2U -#define CHECK_GPGD_MASK _PAGE_PRESENT -#else -#define RESERVE_MEM 4U -#define CHECK_GPGD_MASK _PAGE_TABLE -#endif - /* We actually need a separate PTE page for each CPU. Remember that after the * Switcher code itself comes two pages for each CPU, and we don't want this * CPU's guest to see the pages of any other CPU. */ @@ -84,59 +73,24 @@ static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) { unsigned int index = pgd_index(vaddr); -#ifndef CONFIG_X86_PAE /* We kill any Guest trying to touch the Switcher addresses. */ if (index >= SWITCHER_PGD_INDEX) { kill_guest(cpu, "attempt to access switcher pages"); index = 0; } -#endif /* Return a pointer index'th pgd entry for the i'th page table. */ return &cpu->lg->pgdirs[i].pgdir[index]; } -#ifdef CONFIG_X86_PAE -/* This routine then takes the PGD entry given above, which contains the - * address of the PMD page. It then returns a pointer to the PMD entry for the - * given address. */ -static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) -{ - unsigned int index = pmd_index(vaddr); - pmd_t *page; - - /* We kill any Guest trying to touch the Switcher addresses. */ - if (pgd_index(vaddr) == SWITCHER_PGD_INDEX && - index >= SWITCHER_PMD_INDEX) { - kill_guest(cpu, "attempt to access switcher pages"); - index = 0; - } - - /* You should never call this if the PGD entry wasn't valid */ - BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); - page = __va(pgd_pfn(spgd) << PAGE_SHIFT); - - return &page[index]; -} -#endif - /* This routine then takes the page directory entry returned above, which * contains the address of the page table entry (PTE) page. It then returns a * pointer to the PTE entry for the given address. */ -static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) +static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr) { -#ifdef CONFIG_X86_PAE - pmd_t *pmd = spmd_addr(cpu, spgd, vaddr); - pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT); - - /* You should never call this if the PMD entry wasn't valid */ - BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)); -#else pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); /* You should never call this if the PGD entry wasn't valid */ BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); -#endif - - return &page[pte_index(vaddr)]; + return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE]; } /* These two functions just like the above two, except they access the Guest @@ -147,32 +101,12 @@ static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); } -#ifdef CONFIG_X86_PAE -static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) -{ - unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; - BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); - return gpage + pmd_index(vaddr) * sizeof(pmd_t); -} - -static unsigned long gpte_addr(struct lg_cpu *cpu, - pmd_t gpmd, unsigned long vaddr) -{ - unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT; - - BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT)); - return gpage + pte_index(vaddr) * sizeof(pte_t); -} -#else -static unsigned long gpte_addr(struct lg_cpu *cpu, - pgd_t gpgd, unsigned long vaddr) +static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) { unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; - BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); - return gpage + pte_index(vaddr) * sizeof(pte_t); + return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t); } -#endif /*:*/ /*M:014 get_pfn is slow: we could probably try to grab batches of pages here as @@ -237,7 +171,7 @@ static void release_pte(pte_t pte) /* Remember that get_user_pages_fast() took a reference to the page, in * get_pfn()? We have to put it back now. */ if (pte_flags(pte) & _PAGE_PRESENT) - put_page(pte_page(pte)); + put_page(pfn_to_page(pte_pfn(pte))); } /*:*/ @@ -250,20 +184,11 @@ static void check_gpte(struct lg_cpu *cpu, pte_t gpte) static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd) { - if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) || + if ((pgd_flags(gpgd) & ~_PAGE_TABLE) || (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) kill_guest(cpu, "bad page directory entry"); } -#ifdef CONFIG_X86_PAE -static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd) -{ - if ((pmd_flags(gpmd) & ~_PAGE_TABLE) || - (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) - kill_guest(cpu, "bad page middle directory entry"); -} -#endif - /*H:330 * (i) Looking up a page table entry when the Guest faults. * @@ -282,11 +207,6 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) pte_t gpte; pte_t *spte; -#ifdef CONFIG_X86_PAE - pmd_t *spmd; - pmd_t gpmd; -#endif - /* First step: get the top-level Guest page table entry. */ gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); /* Toplevel not present? We can't map it in. */ @@ -308,45 +228,12 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) check_gpgd(cpu, gpgd); /* And we copy the flags to the shadow PGD entry. The page * number in the shadow PGD is the page we just allocated. */ - set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd))); + *spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd)); } -#ifdef CONFIG_X86_PAE - gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); - /* middle level not present? We can't map it in. */ - if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) - return false; - - /* Now look at the matching shadow entry. */ - spmd = spmd_addr(cpu, *spgd, vaddr); - - if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) { - /* No shadow entry: allocate a new shadow PTE page. */ - unsigned long ptepage = get_zeroed_page(GFP_KERNEL); - - /* This is not really the Guest's fault, but killing it is - * simple for this corner case. */ - if (!ptepage) { - kill_guest(cpu, "out of memory allocating pte page"); - return false; - } - - /* We check that the Guest pmd is OK. */ - check_gpmd(cpu, gpmd); - - /* And we copy the flags to the shadow PMD entry. The page - * number in the shadow PMD is the page we just allocated. */ - native_set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd))); - } - - /* OK, now we look at the lower level in the Guest page table: keep its - * address, because we might update it later. */ - gpte_ptr = gpte_addr(cpu, gpmd, vaddr); -#else /* OK, now we look at the lower level in the Guest page table: keep its * address, because we might update it later. */ - gpte_ptr = gpte_addr(cpu, gpgd, vaddr); -#endif + gpte_ptr = gpte_addr(gpgd, vaddr); gpte = lgread(cpu, gpte_ptr, pte_t); /* If this page isn't in the Guest page tables, we can't page it in. */ @@ -372,7 +259,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) gpte = pte_mkdirty(gpte); /* Get the pointer to the shadow PTE entry we're going to set. */ - spte = spte_addr(cpu, *spgd, vaddr); + spte = spte_addr(*spgd, vaddr); /* If there was a valid shadow PTE entry here before, we release it. * This can happen with a write to a previously read-only entry. */ release_pte(*spte); @@ -386,7 +273,7 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) * table entry, even if the Guest says it's writable. That way * we will come back here when a write does actually occur, so * we can update the Guest's _PAGE_DIRTY flag. */ - native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); + *spte = gpte_to_spte(cpu, pte_wrprotect(gpte), 0); /* Finally, we write the Guest PTE entry back: we've set the * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ @@ -414,23 +301,14 @@ static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) pgd_t *spgd; unsigned long flags; -#ifdef CONFIG_X86_PAE - pmd_t *spmd; -#endif /* Look at the current top level entry: is it present? */ spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) return false; -#ifdef CONFIG_X86_PAE - spmd = spmd_addr(cpu, *spgd, vaddr); - if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) - return false; -#endif - /* Check the flags on the pte entry itself: it must be present and * writable. */ - flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr))); + flags = pte_flags(*(spte_addr(*spgd, vaddr))); return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); } @@ -444,43 +322,8 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr) kill_guest(cpu, "bad stack page %#lx", vaddr); } -#ifdef CONFIG_X86_PAE -static void release_pmd(pmd_t *spmd) -{ - /* If the entry's not present, there's nothing to release. */ - if (pmd_flags(*spmd) & _PAGE_PRESENT) { - unsigned int i; - pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT); - /* For each entry in the page, we might need to release it. */ - for (i = 0; i < PTRS_PER_PTE; i++) - release_pte(ptepage[i]); - /* Now we can free the page of PTEs */ - free_page((long)ptepage); - /* And zero out the PMD entry so we never release it twice. */ - native_set_pmd(spmd, __pmd(0)); - } -} - -static void release_pgd(pgd_t *spgd) -{ - /* If the entry's not present, there's nothing to release. */ - if (pgd_flags(*spgd) & _PAGE_PRESENT) { - unsigned int i; - pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); - - for (i = 0; i < PTRS_PER_PMD; i++) - release_pmd(&pmdpage[i]); - - /* Now we can free the page of PMDs */ - free_page((long)pmdpage); - /* And zero out the PGD entry so we never release it twice. */ - set_pgd(spgd, __pgd(0)); - } -} - -#else /* !CONFIG_X86_PAE */ /*H:450 If we chase down the release_pgd() code, it looks like this: */ -static void release_pgd(pgd_t *spgd) +static void release_pgd(struct lguest *lg, pgd_t *spgd) { /* If the entry's not present, there's nothing to release. */ if (pgd_flags(*spgd) & _PAGE_PRESENT) { @@ -498,7 +341,7 @@ static void release_pgd(pgd_t *spgd) *spgd = __pgd(0); } } -#endif + /*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings() * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. * It simply releases every PTE page from 0 up to the Guest's kernel address. */ @@ -507,7 +350,7 @@ static void flush_user_mappings(struct lguest *lg, int idx) unsigned int i; /* Release every pgd entry up to the kernel's address. */ for (i = 0; i < pgd_index(lg->kernel_address); i++) - release_pgd(lg->pgdirs[idx].pgdir + i); + release_pgd(lg, lg->pgdirs[idx].pgdir + i); } /*H:440 (v) Flushing (throwing away) page tables, @@ -526,9 +369,7 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) { pgd_t gpgd; pte_t gpte; -#ifdef CONFIG_X86_PAE - pmd_t gpmd; -#endif + /* First step: get the top-level Guest page table entry. */ gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); /* Toplevel not present? We can't map it in. */ @@ -537,14 +378,7 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) return -1UL; } -#ifdef CONFIG_X86_PAE - gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); - if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) - kill_guest(cpu, "Bad address %#lx", vaddr); - gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t); -#else - gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); -#endif + gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t); if (!(pte_flags(gpte) & _PAGE_PRESENT)) kill_guest(cpu, "Bad address %#lx", vaddr); @@ -571,9 +405,6 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, int *blank_pgdir) { unsigned int next; -#ifdef CONFIG_X86_PAE - pmd_t *pmd_table; -#endif /* We pick one entry at random to throw out. Choosing the Least * Recently Used might be better, but this is easy. */ @@ -585,27 +416,10 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, /* If the allocation fails, just keep using the one we have */ if (!cpu->lg->pgdirs[next].pgdir) next = cpu->cpu_pgd; - else { -#ifdef CONFIG_X86_PAE - /* In PAE mode, allocate a pmd page and populate the - * last pgd entry. */ - pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL); - if (!pmd_table) { - free_page((long)cpu->lg->pgdirs[next].pgdir); - set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0)); - next = cpu->cpu_pgd; - } else { - set_pgd(cpu->lg->pgdirs[next].pgdir + - SWITCHER_PGD_INDEX, - __pgd(__pa(pmd_table) | _PAGE_PRESENT)); - /* This is a blank page, so there are no kernel - * mappings: caller must map the stack! */ - *blank_pgdir = 1; - } -#else + else + /* This is a blank page, so there are no kernel + * mappings: caller must map the stack! */ *blank_pgdir = 1; -#endif - } } /* Record which Guest toplevel this shadows. */ cpu->lg->pgdirs[next].gpgdir = gpgdir; @@ -617,7 +431,7 @@ static unsigned int new_pgdir(struct lg_cpu *cpu, /*H:430 (iv) Switching page tables * - * Now we've seen all the page table setting and manipulation, let's see + * Now we've seen all the page table setting and manipulation, let's see what * what happens when the Guest changes page tables (ie. changes the top-level * pgdir). This occurs on almost every context switch. */ void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) @@ -646,25 +460,10 @@ static void release_all_pagetables(struct lguest *lg) /* Every shadow pagetable this Guest has */ for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) - if (lg->pgdirs[i].pgdir) { -#ifdef CONFIG_X86_PAE - pgd_t *spgd; - pmd_t *pmdpage; - unsigned int k; - - /* Get the last pmd page. */ - spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX; - pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); - - /* And release the pmd entries of that pmd page, - * except for the switcher pmd. */ - for (k = 0; k < SWITCHER_PMD_INDEX; k++) - release_pmd(&pmdpage[k]); -#endif + if (lg->pgdirs[i].pgdir) /* Every PGD entry except the Switcher at the top */ for (j = 0; j < SWITCHER_PGD_INDEX; j++) - release_pgd(lg->pgdirs[i].pgdir + j); - } + release_pgd(lg, lg->pgdirs[i].pgdir + j); } /* We also throw away everything when a Guest tells us it's changed a kernel @@ -705,37 +504,24 @@ static void do_set_pte(struct lg_cpu *cpu, int idx, { /* Look up the matching shadow page directory entry. */ pgd_t *spgd = spgd_addr(cpu, idx, vaddr); -#ifdef CONFIG_X86_PAE - pmd_t *spmd; -#endif /* If the top level isn't present, there's no entry to update. */ if (pgd_flags(*spgd) & _PAGE_PRESENT) { -#ifdef CONFIG_X86_PAE - spmd = spmd_addr(cpu, *spgd, vaddr); - if (pmd_flags(*spmd) & _PAGE_PRESENT) { -#endif - /* Otherwise, we start by releasing - * the existing entry. */ - pte_t *spte = spte_addr(cpu, *spgd, vaddr); - release_pte(*spte); - - /* If they're setting this entry as dirty or accessed, - * we might as well put that entry they've given us - * in now. This shaves 10% off a - * copy-on-write micro-benchmark. */ - if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { - check_gpte(cpu, gpte); - native_set_pte(spte, - gpte_to_spte(cpu, gpte, - pte_flags(gpte) & _PAGE_DIRTY)); - } else - /* Otherwise kill it and we can demand_page() - * it in later. */ - native_set_pte(spte, __pte(0)); -#ifdef CONFIG_X86_PAE - } -#endif + /* Otherwise, we start by releasing the existing entry. */ + pte_t *spte = spte_addr(*spgd, vaddr); + release_pte(*spte); + + /* If they're setting this entry as dirty or accessed, we might + * as well put that entry they've given us in now. This shaves + * 10% off a copy-on-write micro-benchmark. */ + if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { + check_gpte(cpu, gpte); + *spte = gpte_to_spte(cpu, gpte, + pte_flags(gpte) & _PAGE_DIRTY); + } else + /* Otherwise kill it and we can demand_page() it in + * later. */ + *spte = __pte(0); } } @@ -782,10 +568,12 @@ void guest_set_pte(struct lg_cpu *cpu, * * So with that in mind here's our code to to update a (top-level) PGD entry: */ -void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) +void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx) { int pgdir; + /* The kernel seems to try to initialize this early on: we ignore its + * attempts to map over the Switcher. */ if (idx >= SWITCHER_PGD_INDEX) return; @@ -793,14 +581,8 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) pgdir = find_pgdir(lg, gpgdir); if (pgdir < ARRAY_SIZE(lg->pgdirs)) /* ... throw it away. */ - release_pgd(lg->pgdirs[pgdir].pgdir + idx); + release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); } -#ifdef CONFIG_X86_PAE -void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx) -{ - guest_pagetable_clear_all(&lg->cpus[0]); -} -#endif /* Once we know how much memory we have we can construct simple identity * (which set virtual == physical) and linear mappings @@ -814,16 +596,8 @@ static unsigned long setup_pagetables(struct lguest *lg, { pgd_t __user *pgdir; pte_t __user *linear; + unsigned int mapped_pages, i, linear_pages, phys_linear; unsigned long mem_base = (unsigned long)lg->mem_base; - unsigned int mapped_pages, i, linear_pages; -#ifdef CONFIG_X86_PAE - pmd_t __user *pmds; - unsigned int j; - pgd_t pgd; - pmd_t pmd; -#else - unsigned int phys_linear; -#endif /* We have mapped_pages frames to map, so we need * linear_pages page tables to map them. */ @@ -836,9 +610,6 @@ static unsigned long setup_pagetables(struct lguest *lg, /* Now we use the next linear_pages pages as pte pages */ linear = (void *)pgdir - linear_pages * PAGE_SIZE; -#ifdef CONFIG_X86_PAE - pmds = (void *)linear - PAGE_SIZE; -#endif /* Linear mapping is easy: put every page's address into the * mapping in order. */ for (i = 0; i < mapped_pages; i++) { @@ -850,22 +621,6 @@ static unsigned long setup_pagetables(struct lguest *lg, /* The top level points to the linear page table pages above. * We setup the identity and linear mappings here. */ -#ifdef CONFIG_X86_PAE - for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD; - i += PTRS_PER_PTE, j++) { - native_set_pmd(&pmd, __pmd(((unsigned long)(linear + i) - - mem_base) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); - - if (copy_to_user(&pmds[j], &pmd, sizeof(pmd)) != 0) - return -EFAULT; - } - - set_pgd(&pgd, __pgd(((u32)pmds - mem_base) | _PAGE_PRESENT)); - if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0) - return -EFAULT; - if (copy_to_user(&pgdir[3], &pgd, sizeof(pgd)) != 0) - return -EFAULT; -#else phys_linear = (unsigned long)linear - mem_base; for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) { pgd_t pgd; @@ -878,7 +633,6 @@ static unsigned long setup_pagetables(struct lguest *lg, &pgd, sizeof(pgd))) return -EFAULT; } -#endif /* We return the top level (guest-physical) address: remember where * this is. */ @@ -894,10 +648,7 @@ int init_guest_pagetable(struct lguest *lg) u64 mem; u32 initrd_size; struct boot_params __user *boot = (struct boot_params *)lg->mem_base; -#ifdef CONFIG_X86_PAE - pgd_t *pgd; - pmd_t *pmd_table; -#endif + /* Get the Guest memory size and the ramdisk size from the boot header * located at lg->mem_base (Guest address 0). */ if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem)) @@ -912,15 +663,6 @@ int init_guest_pagetable(struct lguest *lg) lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); if (!lg->pgdirs[0].pgdir) return -ENOMEM; -#ifdef CONFIG_X86_PAE - pgd = lg->pgdirs[0].pgdir; - pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL); - if (!pmd_table) - return -ENOMEM; - - set_pgd(pgd + SWITCHER_PGD_INDEX, - __pgd(__pa(pmd_table) | _PAGE_PRESENT)); -#endif lg->cpus[0].cpu_pgd = 0; return 0; } @@ -930,24 +672,17 @@ void page_table_guest_data_init(struct lg_cpu *cpu) { /* We get the kernel address: above this is all kernel memory. */ if (get_user(cpu->lg->kernel_address, - &cpu->lg->lguest_data->kernel_address) - /* We tell the Guest that it can't use the top 2 or 4 MB - * of virtual addresses used by the Switcher. */ - || put_user(RESERVE_MEM * 1024 * 1024, - &cpu->lg->lguest_data->reserve_mem) - || put_user(cpu->lg->pgdirs[0].gpgdir, - &cpu->lg->lguest_data->pgdir)) + &cpu->lg->lguest_data->kernel_address) + /* We tell the Guest that it can't use the top 4MB of virtual + * addresses used by the Switcher. */ + || put_user(4U*1024*1024, &cpu->lg->lguest_data->reserve_mem) + || put_user(cpu->lg->pgdirs[0].gpgdir, &cpu->lg->lguest_data->pgdir)) kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); /* In flush_user_mappings() we loop from 0 to * "pgd_index(lg->kernel_address)". This assumes it won't hit the * Switcher mappings, so check that now. */ -#ifdef CONFIG_X86_PAE - if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX && - pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX) -#else if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX) -#endif kill_guest(cpu, "bad kernel address %#lx", cpu->lg->kernel_address); } @@ -973,30 +708,16 @@ void free_guest_pagetable(struct lguest *lg) void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) { pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); + pgd_t switcher_pgd; pte_t regs_pte; unsigned long pfn; -#ifdef CONFIG_X86_PAE - pmd_t switcher_pmd; - pmd_t *pmd_table; - - native_set_pmd(&switcher_pmd, pfn_pmd(__pa(switcher_pte_page) >> - PAGE_SHIFT, PAGE_KERNEL_EXEC)); - - pmd_table = __va(pgd_pfn(cpu->lg-> - pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX]) - << PAGE_SHIFT); - native_set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd); -#else - pgd_t switcher_pgd; - /* Make the last PGD entry for this Guest point to the Switcher's PTE * page for this CPU (with appropriate flags). */ - switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC); + switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL); cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; -#endif /* We also change the Switcher PTE page. When we're running the Guest, * we want the Guest's "regs" page to appear where the first Switcher * page for this CPU is. This is an optimization: when the Switcher @@ -1005,9 +726,8 @@ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) * page is already mapped there, we don't have to copy them out * again. */ pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; - native_set_pte(®s_pte, pfn_pte(pfn, PAGE_KERNEL)); - native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], - regs_pte); + regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL)); + switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte; } /*:*/ @@ -1032,21 +752,21 @@ static __init void populate_switcher_pte_page(unsigned int cpu, /* The first entries are easy: they map the Switcher code. */ for (i = 0; i < pages; i++) { - native_set_pte(&pte[i], mk_pte(switcher_page[i], - __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); + pte[i] = mk_pte(switcher_page[i], + __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); } /* The only other thing we map is this CPU's pair of pages. */ i = pages + cpu*2; /* First page (Guest registers) is writable from the Guest */ - native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]), - __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW))); + pte[i] = pfn_pte(page_to_pfn(switcher_page[i]), + __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)); /* The second page contains the "struct lguest_ro_state", and is * read-only. */ - native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]), - __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); + pte[i+1] = pfn_pte(page_to_pfn(switcher_page[i+1]), + __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); } /* We've made it through the page table code. Perhaps our tired brains are diff --git a/trunk/drivers/lguest/segments.c b/trunk/drivers/lguest/segments.c index 482ed5a18750..7ede64ffeef9 100644 --- a/trunk/drivers/lguest/segments.c +++ b/trunk/drivers/lguest/segments.c @@ -150,7 +150,7 @@ void load_guest_gdt_entry(struct lg_cpu *cpu, u32 num, u32 lo, u32 hi) { /* We assume the Guest has the same number of GDT entries as the * Host, otherwise we'd have to dynamically allocate the Guest GDT. */ - if (num >= ARRAY_SIZE(cpu->arch.gdt)) + if (num > ARRAY_SIZE(cpu->arch.gdt)) kill_guest(cpu, "too many gdt entries %i", num); /* Set it up, then fix it. */ diff --git a/trunk/drivers/message/fusion/mptbase.c b/trunk/drivers/message/fusion/mptbase.c index 44b931504457..5d496a99e034 100644 --- a/trunk/drivers/message/fusion/mptbase.c +++ b/trunk/drivers/message/fusion/mptbase.c @@ -146,6 +146,7 @@ static MPT_EVHANDLER MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS]; static MPT_RESETHANDLER MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS]; static struct mpt_pci_driver *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS]; +static DECLARE_WAIT_QUEUE_HEAD(mpt_waitq); /* * Driver Callback Index's @@ -158,8 +159,7 @@ static u8 last_drv_idx; * Forward protos... */ static irqreturn_t mpt_interrupt(int irq, void *bus_id); -static int mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, - MPT_FRAME_HDR *reply); +static int mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply); static int mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req, int replyBytes, u16 *u16reply, int maxwait, int sleepFlag); @@ -190,9 +190,9 @@ static int mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum); static int mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum); static void mpt_read_ioc_pg_1(MPT_ADAPTER *ioc); static void mpt_read_ioc_pg_4(MPT_ADAPTER *ioc); +static void mpt_timer_expired(unsigned long data); static void mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc); -static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, - int sleepFlag); +static int SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch); static int SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp); static int mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag); static int mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init); @@ -207,8 +207,8 @@ static int procmpt_iocinfo_read(char *buf, char **start, off_t offset, #endif static void mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc); -static int ProcessEventNotification(MPT_ADAPTER *ioc, - EventNotificationReply_t *evReply, int *evHandlers); +//int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag); +static int ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply, int *evHandlers); static void mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf); static void mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info); static void mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info); @@ -276,56 +276,6 @@ mpt_get_cb_idx(MPT_DRIVER_CLASS dclass) return 0; } -/** - * mpt_is_discovery_complete - determine if discovery has completed - * @ioc: per adatper instance - * - * Returns 1 when discovery completed, else zero. - */ -static int -mpt_is_discovery_complete(MPT_ADAPTER *ioc) -{ - ConfigExtendedPageHeader_t hdr; - CONFIGPARMS cfg; - SasIOUnitPage0_t *buffer; - dma_addr_t dma_handle; - int rc = 0; - - memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t)); - memset(&cfg, 0, sizeof(CONFIGPARMS)); - hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION; - hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; - hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; - cfg.cfghdr.ehdr = &hdr; - cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; - - if ((mpt_config(ioc, &cfg))) - goto out; - if (!hdr.ExtPageLength) - goto out; - - buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4, - &dma_handle); - if (!buffer) - goto out; - - cfg.physAddr = dma_handle; - cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; - - if ((mpt_config(ioc, &cfg))) - goto out_free_consistent; - - if (!(buffer->PhyData[0].PortFlags & - MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS)) - rc = 1; - - out_free_consistent: - pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4, - buffer, dma_handle); - out: - return rc; -} - /** * mpt_fault_reset_work - work performed on workq after ioc fault * @work: input argument, used to derive ioc @@ -340,7 +290,7 @@ mpt_fault_reset_work(struct work_struct *work) int rc; unsigned long flags; - if (ioc->ioc_reset_in_progress || !ioc->active) + if (ioc->diagPending || !ioc->active) goto out; ioc_raw_state = mpt_GetIocState(ioc, 0); @@ -357,12 +307,6 @@ mpt_fault_reset_work(struct work_struct *work) printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after " "reset (%04xh)\n", ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK); - } else if (ioc->bus_type == SAS && ioc->sas_discovery_quiesce_io) { - if ((mpt_is_discovery_complete(ioc))) { - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "clearing " - "discovery_quiesce_io flag\n", ioc->name)); - ioc->sas_discovery_quiesce_io = 0; - } } out: @@ -373,11 +317,11 @@ mpt_fault_reset_work(struct work_struct *work) ioc = ioc->alt_ioc; /* rearm the timer */ - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); + spin_lock_irqsave(&ioc->fault_reset_work_lock, flags); if (ioc->reset_work_q) queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work, msecs_to_jiffies(MPT_POLLING_INTERVAL)); - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags); } @@ -557,9 +501,9 @@ mpt_interrupt(int irq, void *bus_id) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mptbase_reply - MPT base driver's callback routine + * mpt_base_reply - MPT base driver's callback routine * @ioc: Pointer to MPT_ADAPTER structure - * @req: Pointer to original MPT request frame + * @mf: Pointer to original MPT request frame * @reply: Pointer to MPT reply frame (NULL if TurboReply) * * MPT base driver's callback routine; all base driver @@ -570,49 +514,122 @@ mpt_interrupt(int irq, void *bus_id) * should be freed, or 0 if it shouldn't. */ static int -mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) +mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply) { - EventNotificationReply_t *pEventReply; - u8 event; - int evHandlers; int freereq = 1; + u8 func; - switch (reply->u.hdr.Function) { - case MPI_FUNCTION_EVENT_NOTIFICATION: - pEventReply = (EventNotificationReply_t *)reply; - evHandlers = 0; - ProcessEventNotification(ioc, pEventReply, &evHandlers); - event = le32_to_cpu(pEventReply->Event) & 0xFF; - if (pEventReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) - freereq = 0; - if (event != MPI_EVENT_EVENT_CHANGE) - break; - case MPI_FUNCTION_CONFIG: - case MPI_FUNCTION_SAS_IO_UNIT_CONTROL: - ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; - if (reply) { - ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_RF_VALID; - memcpy(ioc->mptbase_cmds.reply, reply, - min(MPT_DEFAULT_FRAME_SIZE, - 4 * reply->u.reply.MsgLength)); + dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply() called\n", ioc->name)); +#ifdef CONFIG_FUSION_LOGGING + if ((ioc->debug_level & MPT_DEBUG_MSG_FRAME) && + !(reply->u.hdr.MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) { + dmfprintk(ioc, printk(MYIOC_s_INFO_FMT ": Original request frame (@%p) header\n", + ioc->name, mf)); + DBG_DUMP_REQUEST_FRAME_HDR(ioc, (u32 *)mf); + } +#endif + + func = reply->u.hdr.Function; + dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply, Function=%02Xh\n", + ioc->name, func)); + + if (func == MPI_FUNCTION_EVENT_NOTIFICATION) { + EventNotificationReply_t *pEvReply = (EventNotificationReply_t *) reply; + int evHandlers = 0; + int results; + + results = ProcessEventNotification(ioc, pEvReply, &evHandlers); + if (results != evHandlers) { + /* CHECKME! Any special handling needed here? */ + devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "Called %d event handlers, sum results = %d\n", + ioc->name, evHandlers, results)); } - if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) { - ioc->mptbase_cmds.status &= ~MPT_MGMT_STATUS_PENDING; - complete(&ioc->mptbase_cmds.done); - } else + + /* + * Hmmm... It seems that EventNotificationReply is an exception + * to the rule of one reply per request. + */ + if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) { freereq = 0; - if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_FREE_MF) - freereq = 1; - break; - case MPI_FUNCTION_EVENT_ACK: - devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "EventAck reply received\n", ioc->name)); - break; - default: - printk(MYIOC_s_ERR_FMT - "Unexpected msg function (=%02Xh) reply received!\n", - ioc->name, reply->u.hdr.Function); - break; + } else { + devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n", + ioc->name, pEvReply)); + } + +#ifdef CONFIG_PROC_FS +// LogEvent(ioc, pEvReply); +#endif + + } else if (func == MPI_FUNCTION_EVENT_ACK) { + dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply, EventAck reply received\n", + ioc->name)); + } else if (func == MPI_FUNCTION_CONFIG) { + CONFIGPARMS *pCfg; + unsigned long flags; + + dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "config_complete (mf=%p,mr=%p)\n", + ioc->name, mf, reply)); + + pCfg = * ((CONFIGPARMS **)((u8 *) mf + ioc->req_sz - sizeof(void *))); + + if (pCfg) { + /* disable timer and remove from linked list */ + del_timer(&pCfg->timer); + + spin_lock_irqsave(&ioc->FreeQlock, flags); + list_del(&pCfg->linkage); + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + + /* + * If IOC Status is SUCCESS, save the header + * and set the status code to GOOD. + */ + pCfg->status = MPT_CONFIG_ERROR; + if (reply) { + ConfigReply_t *pReply = (ConfigReply_t *)reply; + u16 status; + + status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK; + dcprintk(ioc, printk(MYIOC_s_NOTE_FMT " IOCStatus=%04xh, IOCLogInfo=%08xh\n", + ioc->name, status, le32_to_cpu(pReply->IOCLogInfo))); + + pCfg->status = status; + if (status == MPI_IOCSTATUS_SUCCESS) { + if ((pReply->Header.PageType & + MPI_CONFIG_PAGETYPE_MASK) == + MPI_CONFIG_PAGETYPE_EXTENDED) { + pCfg->cfghdr.ehdr->ExtPageLength = + le16_to_cpu(pReply->ExtPageLength); + pCfg->cfghdr.ehdr->ExtPageType = + pReply->ExtPageType; + } + pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion; + + /* If this is a regular header, save PageLength. */ + /* LMP Do this better so not using a reserved field! */ + pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength; + pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber; + pCfg->cfghdr.hdr->PageType = pReply->Header.PageType; + } + } + + /* + * Wake up the original calling thread + */ + pCfg->wait_done = 1; + wake_up(&mpt_waitq); + } + } else if (func == MPI_FUNCTION_SAS_IO_UNIT_CONTROL) { + /* we should be always getting a reply frame */ + memcpy(ioc->persist_reply_frame, reply, + min(MPT_DEFAULT_FRAME_SIZE, + 4*reply->u.reply.MsgLength)); + del_timer(&ioc->persist_timer); + ioc->persist_wait_done = 1; + wake_up(&mpt_waitq); + } else { + printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n", + ioc->name, func); } /* @@ -971,139 +988,41 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) /* Put Request back on FreeQ! */ spin_lock_irqsave(&ioc->FreeQlock, flags); - if (cpu_to_le32(mf->u.frame.linkage.arg1) == 0xdeadbeaf) - goto out; - /* signature to know if this mf is freed */ - mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf); + mf->u.frame.linkage.arg1 = 0xdeadbeaf; /* signature to know if this mf is freed */ list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ); #ifdef MFCNT ioc->mfcnt--; #endif - out: spin_unlock_irqrestore(&ioc->FreeQlock, flags); } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mpt_add_sge - Place a simple 32 bit SGE at address pAddr. - * @pAddr: virtual address for SGE - * @flagslength: SGE flags and data transfer length - * @dma_addr: Physical address - * - * This routine places a MPT request frame back on the MPT adapter's - * FreeQ. - */ -static void -mpt_add_sge(void *pAddr, u32 flagslength, dma_addr_t dma_addr) -{ - SGESimple32_t *pSge = (SGESimple32_t *) pAddr; - pSge->FlagsLength = cpu_to_le32(flagslength); - pSge->Address = cpu_to_le32(dma_addr); -} - -/** - * mpt_add_sge_64bit - Place a simple 64 bit SGE at address pAddr. + * mpt_add_sge - Place a simple SGE at address pAddr. * @pAddr: virtual address for SGE * @flagslength: SGE flags and data transfer length * @dma_addr: Physical address * * This routine places a MPT request frame back on the MPT adapter's * FreeQ. - **/ -static void -mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr) -{ - SGESimple64_t *pSge = (SGESimple64_t *) pAddr; - pSge->Address.Low = cpu_to_le32 - (lower_32_bits((unsigned long)(dma_addr))); - pSge->Address.High = cpu_to_le32 - (upper_32_bits((unsigned long)dma_addr)); - pSge->FlagsLength = cpu_to_le32 - ((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING)); -} - -/** - * mpt_add_sge_64bit_1078 - Place a simple 64 bit SGE at address pAddr - * (1078 workaround). - * @pAddr: virtual address for SGE - * @flagslength: SGE flags and data transfer length - * @dma_addr: Physical address - * - * This routine places a MPT request frame back on the MPT adapter's - * FreeQ. - **/ -static void -mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr) -{ - SGESimple64_t *pSge = (SGESimple64_t *) pAddr; - u32 tmp; - - pSge->Address.Low = cpu_to_le32 - (lower_32_bits((unsigned long)(dma_addr))); - tmp = (u32)(upper_32_bits((unsigned long)dma_addr)); - - /* - * 1078 errata workaround for the 36GB limitation - */ - if ((((u64)dma_addr + MPI_SGE_LENGTH(flagslength)) >> 32) == 9) { - flagslength |= - MPI_SGE_SET_FLAGS(MPI_SGE_FLAGS_LOCAL_ADDRESS); - tmp |= (1<<31); - if (mpt_debug_level & MPT_DEBUG_36GB_MEM) - printk(KERN_DEBUG "1078 P0M2 addressing for " - "addr = 0x%llx len = %d\n", - (unsigned long long)dma_addr, - MPI_SGE_LENGTH(flagslength)); - } - - pSge->Address.High = cpu_to_le32(tmp); - pSge->FlagsLength = cpu_to_le32( - (flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING)); -} - -/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mpt_add_chain - Place a 32 bit chain SGE at address pAddr. - * @pAddr: virtual address for SGE - * @next: nextChainOffset value (u32's) - * @length: length of next SGL segment - * @dma_addr: Physical address - * - */ -static void -mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr) -{ - SGEChain32_t *pChain = (SGEChain32_t *) pAddr; - pChain->Length = cpu_to_le16(length); - pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; - pChain->NextChainOffset = next; - pChain->Address = cpu_to_le32(dma_addr); -} - -/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -/** - * mpt_add_chain_64bit - Place a 64 bit chain SGE at address pAddr. - * @pAddr: virtual address for SGE - * @next: nextChainOffset value (u32's) - * @length: length of next SGL segment - * @dma_addr: Physical address - * */ -static void -mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr) +void +mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr) { - SGEChain64_t *pChain = (SGEChain64_t *) pAddr; + if (sizeof(dma_addr_t) == sizeof(u64)) { + SGESimple64_t *pSge = (SGESimple64_t *) pAddr; u32 tmp = dma_addr & 0xFFFFFFFF; - pChain->Length = cpu_to_le16(length); - pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT | - MPI_SGE_FLAGS_64_BIT_ADDRESSING); - - pChain->NextChainOffset = next; + pSge->FlagsLength = cpu_to_le32(flagslength); + pSge->Address.Low = cpu_to_le32(tmp); + tmp = (u32) ((u64)dma_addr >> 32); + pSge->Address.High = cpu_to_le32(tmp); - pChain->Address.Low = cpu_to_le32(tmp); - tmp = (u32)(upper_32_bits((unsigned long)dma_addr)); - pChain->Address.High = cpu_to_le32(tmp); + } else { + SGESimple32_t *pSge = (SGESimple32_t *) pAddr; + pSge->FlagsLength = cpu_to_le32(flagslength); + pSge->Address = cpu_to_le32(dma_addr); + } } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -1306,7 +1225,7 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init) } flags_length = flags_length << MPI_SGE_FLAGS_SHIFT; flags_length |= ioc->HostPageBuffer_sz; - ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma); + mpt_add_sge(psge, flags_length, ioc->HostPageBuffer_dma); ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE; return 0; @@ -1615,42 +1534,21 @@ mpt_mapresources(MPT_ADAPTER *ioc) pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); - if (sizeof(dma_addr_t) > 4) { - const uint64_t required_mask = dma_get_required_mask - (&pdev->dev); - if (required_mask > DMA_BIT_MASK(32) - && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) - && !pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(64))) { - ioc->dma_mask = DMA_BIT_MASK(64); - dinitprintk(ioc, printk(MYIOC_s_INFO_FMT - ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", - ioc->name)); - } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) - && !pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(32))) { - ioc->dma_mask = DMA_BIT_MASK(32); - dinitprintk(ioc, printk(MYIOC_s_INFO_FMT - ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", - ioc->name)); - } else { - printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n", - ioc->name, pci_name(pdev)); - return r; - } + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) + && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + dinitprintk(ioc, printk(MYIOC_s_INFO_FMT + ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", + ioc->name)); + } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) + && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { + dinitprintk(ioc, printk(MYIOC_s_INFO_FMT + ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", + ioc->name)); } else { - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) - && !pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(32))) { - ioc->dma_mask = DMA_BIT_MASK(32); - dinitprintk(ioc, printk(MYIOC_s_INFO_FMT - ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n", - ioc->name)); - } else { - printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n", - ioc->name, pci_name(pdev)); - return r; - } + printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n", + ioc->name, pci_name(pdev)); + pci_release_selected_regions(pdev, ioc->bars); + return r; } mem_phys = msize = 0; @@ -1734,7 +1632,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) ioc->id = mpt_ids++; sprintf(ioc->name, "ioc%d", ioc->id); - dinitprintk(ioc, printk(KERN_WARNING MYNAM ": mpt_adapter_install\n")); /* * set initial debug level @@ -1753,36 +1650,14 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) return r; } - /* - * Setting up proper handlers for scatter gather handling - */ - if (ioc->dma_mask == DMA_BIT_MASK(64)) { - if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) - ioc->add_sge = &mpt_add_sge_64bit_1078; - else - ioc->add_sge = &mpt_add_sge_64bit; - ioc->add_chain = &mpt_add_chain_64bit; - ioc->sg_addr_size = 8; - } else { - ioc->add_sge = &mpt_add_sge; - ioc->add_chain = &mpt_add_chain; - ioc->sg_addr_size = 4; - } - ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size; - ioc->alloc_total = sizeof(MPT_ADAPTER); ioc->req_sz = MPT_DEFAULT_FRAME_SIZE; /* avoid div by zero! */ ioc->reply_sz = MPT_REPLY_FRAME_SIZE; ioc->pcidev = pdev; - - spin_lock_init(&ioc->taskmgmt_lock); - mutex_init(&ioc->internal_cmds.mutex); - init_completion(&ioc->internal_cmds.done); - mutex_init(&ioc->mptbase_cmds.mutex); - init_completion(&ioc->mptbase_cmds.done); - mutex_init(&ioc->taskmgmt_cmds.mutex); - init_completion(&ioc->taskmgmt_cmds.done); + ioc->diagPending = 0; + spin_lock_init(&ioc->diagLock); + spin_lock_init(&ioc->initializing_hba_lock); /* Initialize the event logging. */ @@ -1795,13 +1670,16 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) ioc->mfcnt = 0; #endif - ioc->sh = NULL; ioc->cached_fw = NULL; /* Initilize SCSI Config Data structure */ memset(&ioc->spi_data, 0, sizeof(SpiCfgData)); + /* Initialize the running configQ head. + */ + INIT_LIST_HEAD(&ioc->configQ); + /* Initialize the fc rport list head. */ INIT_LIST_HEAD(&ioc->fc_rports); @@ -1812,8 +1690,9 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) /* Initialize workqueue */ INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work); + spin_lock_init(&ioc->fault_reset_work_lock); - snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN, + snprintf(ioc->reset_work_q_name, sizeof(ioc->reset_work_q_name), "mpt_poll_%d", ioc->id); ioc->reset_work_q = create_singlethread_workqueue(ioc->reset_work_q_name); @@ -1888,14 +1767,11 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) case MPI_MANUFACTPAGE_DEVID_SAS1064: case MPI_MANUFACTPAGE_DEVID_SAS1068: ioc->errata_flag_1064 = 1; - ioc->bus_type = SAS; - break; case MPI_MANUFACTPAGE_DEVID_SAS1064E: case MPI_MANUFACTPAGE_DEVID_SAS1068E: case MPI_MANUFACTPAGE_DEVID_SAS1078: ioc->bus_type = SAS; - break; } @@ -1937,11 +1813,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) */ mpt_detect_bound_ports(ioc, pdev); - INIT_LIST_HEAD(&ioc->fw_event_list); - spin_lock_init(&ioc->fw_event_lock); - snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id); - ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name); - if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP, CAN_SLEEP)) != 0){ printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n", @@ -2014,18 +1885,13 @@ mpt_detach(struct pci_dev *pdev) /* * Stop polling ioc for fault condition */ - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); + spin_lock_irqsave(&ioc->fault_reset_work_lock, flags); wq = ioc->reset_work_q; ioc->reset_work_q = NULL; - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags); cancel_delayed_work(&ioc->fault_reset_work); destroy_workqueue(wq); - spin_lock_irqsave(&ioc->fw_event_lock, flags); - wq = ioc->fw_event_q; - ioc->fw_event_q = NULL; - spin_unlock_irqrestore(&ioc->fw_event_lock, flags); - destroy_workqueue(wq); sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name); remove_proc_entry(pname, NULL); @@ -2128,21 +1994,6 @@ mpt_resume(struct pci_dev *pdev) if (err) return err; - if (ioc->dma_mask == DMA_BIT_MASK(64)) { - if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) - ioc->add_sge = &mpt_add_sge_64bit_1078; - else - ioc->add_sge = &mpt_add_sge_64bit; - ioc->add_chain = &mpt_add_chain_64bit; - ioc->sg_addr_size = 8; - } else { - - ioc->add_sge = &mpt_add_sge; - ioc->add_chain = &mpt_add_chain; - ioc->sg_addr_size = 4; - } - ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size; - printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n", ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT), CHIPREG_READ32(&ioc->chip->Doorbell)); @@ -2240,16 +2091,12 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) ioc->active = 0; if (ioc->alt_ioc) { - if (ioc->alt_ioc->active || - reason == MPT_HOSTEVENT_IOC_RECOVER) { + if (ioc->alt_ioc->active) reset_alt_ioc_active = 1; - /* Disable alt-IOC's reply interrupts - * (and FreeQ) for a bit - **/ - CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, - 0xFFFFFFFF); - ioc->alt_ioc->active = 0; - } + + /* Disable alt-IOC's reply interrupts (and FreeQ) for a bit ... */ + CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, 0xFFFFFFFF); + ioc->alt_ioc->active = 0; } hard = 1; @@ -2270,11 +2117,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) } } else { - printk(MYIOC_s_WARN_FMT - "NOT READY WARNING!\n", ioc->name); + printk(MYIOC_s_WARN_FMT "NOT READY!\n", ioc->name); } - ret = -1; - goto out; + return -1; } /* hard_reset_done = 0 if a soft reset was performed @@ -2284,9 +2129,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0) alt_ioc_ready = 1; else - printk(MYIOC_s_WARN_FMT - ": alt-ioc Not ready WARNING!\n", - ioc->alt_ioc->name); + printk(MYIOC_s_WARN_FMT "alt_ioc not ready!\n", ioc->alt_ioc->name); } for (ii=0; ii<5; ii++) { @@ -2307,8 +2150,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) if (alt_ioc_ready) { if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) { dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "Initial Alt IocFacts failed rc=%x\n", - ioc->name, rc)); + "Initial Alt IocFacts failed rc=%x\n", ioc->name, rc)); /* Retry - alt IOC was initialized once */ rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason); @@ -2352,20 +2194,16 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) IRQF_SHARED, ioc->name, ioc); if (rc < 0) { printk(MYIOC_s_ERR_FMT "Unable to allocate " - "interrupt %d!\n", - ioc->name, ioc->pcidev->irq); + "interrupt %d!\n", ioc->name, ioc->pcidev->irq); if (ioc->msi_enable) pci_disable_msi(ioc->pcidev); - ret = -EBUSY; - goto out; + return -EBUSY; } irq_allocated = 1; ioc->pci_irq = ioc->pcidev->irq; pci_set_master(ioc->pcidev); /* ?? */ - pci_set_drvdata(ioc->pcidev, ioc); - dinitprintk(ioc, printk(MYIOC_s_INFO_FMT - "installed at interrupt %d\n", ioc->name, - ioc->pcidev->irq)); + dprintk(ioc, printk(MYIOC_s_INFO_FMT "installed at interrupt " + "%d\n", ioc->name, ioc->pcidev->irq)); } } @@ -2374,22 +2212,17 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) * init as upper addresses are needed for init. * If fails, continue with alt-ioc processing */ - dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "PrimeIocFifos\n", - ioc->name)); if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0)) ret = -3; /* May need to check/upload firmware & data here! * If fails, continue with alt-ioc processing */ - dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "SendIocInit\n", - ioc->name)); if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0)) ret = -4; // NEW! if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) { - printk(MYIOC_s_WARN_FMT - ": alt-ioc (%d) FIFO mgmt alloc WARNING!\n", + printk(MYIOC_s_WARN_FMT ": alt_ioc (%d) FIFO mgmt alloc!\n", ioc->alt_ioc->name, rc); alt_ioc_ready = 0; reset_alt_ioc_active = 0; @@ -2399,9 +2232,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) { alt_ioc_ready = 0; reset_alt_ioc_active = 0; - printk(MYIOC_s_WARN_FMT - ": alt-ioc: (%d) init failure WARNING!\n", - ioc->alt_ioc->name, rc); + printk(MYIOC_s_WARN_FMT "alt_ioc (%d) init failure!\n", + ioc->alt_ioc->name, rc); } } @@ -2437,36 +2269,28 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) } } - /* Enable MPT base driver management of EventNotification - * and EventAck handling. - */ - if ((ret == 0) && (!ioc->facts.EventState)) { - dinitprintk(ioc, printk(MYIOC_s_INFO_FMT - "SendEventNotification\n", - ioc->name)); - ret = SendEventNotification(ioc, 1, sleepFlag); /* 1=Enable */ - } - - if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState) - rc = SendEventNotification(ioc->alt_ioc, 1, sleepFlag); - if (ret == 0) { /* Enable! (reply interrupt) */ CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM); ioc->active = 1; } - if (rc == 0) { /* alt ioc */ - if (reset_alt_ioc_active && ioc->alt_ioc) { - /* (re)Enable alt-IOC! (reply interrupt) */ - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "alt-ioc" - "reply irq re-enabled\n", - ioc->alt_ioc->name)); - CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, - MPI_HIM_DIM); - ioc->alt_ioc->active = 1; - } + + if (reset_alt_ioc_active && ioc->alt_ioc) { + /* (re)Enable alt-IOC! (reply interrupt) */ + dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "alt_ioc reply irq re-enabled\n", + ioc->alt_ioc->name)); + CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, MPI_HIM_DIM); + ioc->alt_ioc->active = 1; } + /* Enable MPT base driver management of EventNotification + * and EventAck handling. + */ + if ((ret == 0) && (!ioc->facts.EventState)) + (void) SendEventNotification(ioc, 1); /* 1=Enable EventNotification */ + + if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState) + (void) SendEventNotification(ioc->alt_ioc, 1); /* 1=Enable EventNotification */ /* Add additional "reason" check before call to GetLanConfigPages * (combined with GetIoUnitPage2 call). This prevents a somewhat @@ -2482,9 +2306,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) mutex_init(&ioc->raid_data.inactive_list_mutex); INIT_LIST_HEAD(&ioc->raid_data.inactive_list); - switch (ioc->bus_type) { + if (ioc->bus_type == SAS) { - case SAS: /* clear persistency table */ if(ioc->facts.IOCExceptions & MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) { @@ -2498,15 +2321,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) */ mpt_findImVolumes(ioc); - /* Check, and possibly reset, the coalescing value - */ - mpt_read_ioc_pg_1(ioc); - - break; - - case FC: - if ((ioc->pfacts[0].ProtocolFlags & - MPI_PORTFACTS_PROTOCOL_LAN) && + } else if (ioc->bus_type == FC) { + if ((ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) && (ioc->lan_cnfg_page0.Header.PageLength == 0)) { /* * Pre-fetch the ports LAN MAC address! @@ -2515,14 +2331,11 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) (void) GetLanConfigPages(ioc); a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow; dprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "LanAddr = %02X:%02X:%02X" - ":%02X:%02X:%02X\n", - ioc->name, a[5], a[4], - a[3], a[2], a[1], a[0])); - } - break; + "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n", + ioc->name, a[5], a[4], a[3], a[2], a[1], a[0])); - case SPI: + } + } else { /* Get NVRAM and adapter maximums from SPP 0 and 2 */ mpt_GetScsiPortSettings(ioc, 0); @@ -2541,8 +2354,6 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag) mpt_read_ioc_pg_1(ioc); mpt_read_ioc_pg_4(ioc); - - break; } GetIoUnitPage2(ioc); @@ -2624,20 +2435,16 @@ mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev) if (_pcidev == peer) { /* Paranoia checks */ if (ioc->alt_ioc != NULL) { - printk(MYIOC_s_WARN_FMT - "Oops, already bound (%s <==> %s)!\n", - ioc->name, ioc->name, ioc->alt_ioc->name); + printk(MYIOC_s_WARN_FMT "Oops, already bound to %s!\n", + ioc->name, ioc->alt_ioc->name); break; } else if (ioc_srch->alt_ioc != NULL) { - printk(MYIOC_s_WARN_FMT - "Oops, already bound (%s <==> %s)!\n", - ioc_srch->name, ioc_srch->name, - ioc_srch->alt_ioc->name); + printk(MYIOC_s_WARN_FMT "Oops, already bound to %s!\n", + ioc_srch->name, ioc_srch->alt_ioc->name); break; } - dprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "FOUND! binding %s <==> %s\n", - ioc->name, ioc->name, ioc_srch->name)); + dprintk(ioc, printk(MYIOC_s_INFO_FMT "FOUND! binding to %s\n", + ioc->name, ioc_srch->name)); ioc_srch->alt_ioc = ioc; ioc->alt_ioc = ioc_srch; } @@ -2657,8 +2464,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc) int ret; if (ioc->cached_fw != NULL) { - ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: Pushing FW onto adapter\n", __func__, ioc->name)); + ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto " + "adapter\n", __func__, ioc->name)); if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *) ioc->cached_fw, CAN_SLEEP)) < 0) { printk(MYIOC_s_WARN_FMT @@ -2667,30 +2474,11 @@ mpt_adapter_disable(MPT_ADAPTER *ioc) } } - /* - * Put the controller into ready state (if its not already) - */ - if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) { - if (!SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, - CAN_SLEEP)) { - if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) - printk(MYIOC_s_ERR_FMT "%s: IOC msg unit " - "reset failed to put ioc in ready state!\n", - ioc->name, __func__); - } else - printk(MYIOC_s_ERR_FMT "%s: IOC msg unit reset " - "failed!\n", ioc->name, __func__); - } - - /* Disable adapter interrupts! */ - synchronize_irq(ioc->pcidev->irq); CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF); ioc->active = 0; - /* Clear any lingering interrupt */ CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); - CHIPREG_READ32(&ioc->chip->IntStatus); if (ioc->alloc != NULL) { sz = ioc->alloc_sz; @@ -2750,22 +2538,19 @@ mpt_adapter_disable(MPT_ADAPTER *ioc) if((ret = mpt_host_page_access_control(ioc, MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) { printk(MYIOC_s_ERR_FMT - ": %s: host page buffers free failed (%d)!\n", - ioc->name, __func__, ret); + "host page buffers free failed (%d)!\n", + ioc->name, ret); } - dexitprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "HostPageBuffer free @ %p, sz=%d bytes\n", - ioc->name, ioc->HostPageBuffer, - ioc->HostPageBuffer_sz)); + dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "HostPageBuffer free @ %p, sz=%d bytes\n", + ioc->name, ioc->HostPageBuffer, ioc->HostPageBuffer_sz)); pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz, ioc->HostPageBuffer, ioc->HostPageBuffer_dma); ioc->HostPageBuffer = NULL; ioc->HostPageBuffer_sz = 0; ioc->alloc_total -= ioc->HostPageBuffer_sz; } - - pci_set_drvdata(ioc->pcidev, NULL); } + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * mpt_adapter_dispose - Free all resources associated with an MPT adapter @@ -2905,12 +2690,8 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag) } /* Is it already READY? */ - if (!statefault && - ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)) { - dinitprintk(ioc, printk(MYIOC_s_INFO_FMT - "IOC is in READY state\n", ioc->name)); + if (!statefault && (ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY) return 0; - } /* * Check to see if IOC is in FAULT state. @@ -2983,9 +2764,8 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag) ii++; cntdn--; if (!cntdn) { - printk(MYIOC_s_ERR_FMT - "Wait IOC_READY state (0x%x) timeout(%d)!\n", - ioc->name, ioc_state, (int)((ii+5)/HZ)); + printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n", + ioc->name, (int)((ii+5)/HZ)); return -ETIME; } @@ -2998,8 +2778,9 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag) } if (statefault < 3) { - printk(MYIOC_s_INFO_FMT "Recovered from %s\n", ioc->name, - statefault == 1 ? "stuck handshake" : "IOC FAULT"); + printk(MYIOC_s_INFO_FMT "Recovered from %s\n", + ioc->name, + statefault==1 ? "stuck handshake" : "IOC FAULT"); } return hard_reset_done; @@ -3052,9 +2833,8 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason) /* IOC *must* NOT be in RESET state! */ if (ioc->last_state == MPI_IOC_STATE_RESET) { - printk(KERN_ERR MYNAM - ": ERROR - Can't get IOCFacts, %s NOT READY! (%08x)\n", - ioc->name, ioc->last_state); + printk(MYIOC_s_ERR_FMT "Can't get IOCFacts NOT READY! (%08x)\n", + ioc->name, ioc->last_state ); return -44; } @@ -3116,7 +2896,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason) * Old: u16{Major(4),Minor(4),SubMinor(8)} * New: u32{Major(8),Minor(8),Unit(8),Dev(8)} */ - if (facts->MsgVersion < MPI_VERSION_01_02) { + if (facts->MsgVersion < 0x0102) { /* * Handle old FC f/w style, convert to new... */ @@ -3128,11 +2908,9 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason) facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word); facts->ProductID = le16_to_cpu(facts->ProductID); - if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK) > MPI_FW_HEADER_PID_PROD_TARGET_SCSI) ioc->ir_firmware = 1; - facts->CurrentHostMfaHighAddr = le32_to_cpu(facts->CurrentHostMfaHighAddr); facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits); @@ -3148,7 +2926,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason) * to 14 in MPI-1.01.0x. */ if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 && - facts->MsgVersion > MPI_VERSION_01_00) { + facts->MsgVersion > 0x0100) { facts->FWImageSize = le32_to_cpu(facts->FWImageSize); } @@ -3330,7 +3108,6 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag) ioc_init.MaxDevices = (U8)ioc->devices_per_bus; ioc_init.MaxBuses = (U8)ioc->number_of_buses; - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n", ioc->name, ioc->facts.MsgVersion)); if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) { @@ -3345,7 +3122,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag) } ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz); /* in BYTES */ - if (ioc->sg_addr_size == sizeof(u64)) { + if (sizeof(dma_addr_t) == sizeof(u64)) { /* Save the upper 32-bits of the request * (reply) and sense buffers. */ @@ -3548,10 +3325,11 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag) FWUpload_t *prequest; FWUploadReply_t *preply; FWUploadTCSGE_t *ptcsge; + int sgeoffset; u32 flagsLength; int ii, sz, reply_sz; int cmdStatus; - int request_size; + /* If the image size is 0, we are done. */ if ((sz = ioc->facts.FWImageSize) == 0) @@ -3586,41 +3364,42 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag) ptcsge->ImageSize = cpu_to_le32(sz); ptcsge++; + sgeoffset = sizeof(FWUpload_t) - sizeof(SGE_MPI_UNION) + sizeof(FWUploadTCSGE_t); + flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz; - ioc->add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma); - request_size = offsetof(FWUpload_t, SGL) + sizeof(FWUploadTCSGE_t) + - ioc->SGE_size; - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending FW Upload " - " (req @ %p) fw_size=%d mf_request_size=%d\n", ioc->name, prequest, - ioc->facts.FWImageSize, request_size)); + mpt_add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma); + + sgeoffset += sizeof(u32) + sizeof(dma_addr_t); + dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": Sending FW Upload (req @ %p) sgeoffset=%d \n", + ioc->name, prequest, sgeoffset)); DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest); - ii = mpt_handshake_req_reply_wait(ioc, request_size, (u32 *)prequest, - reply_sz, (u16 *)preply, 65 /*seconds*/, sleepFlag); + ii = mpt_handshake_req_reply_wait(ioc, sgeoffset, (u32*)prequest, + reply_sz, (u16*)preply, 65 /*seconds*/, sleepFlag); - dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Upload completed " - "rc=%x \n", ioc->name, ii)); + dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Upload completed rc=%x \n", ioc->name, ii)); cmdStatus = -EFAULT; if (ii == 0) { /* Handshake transfer was complete and successful. * Check the Reply Frame. */ - int status; - status = le16_to_cpu(preply->IOCStatus) & - MPI_IOCSTATUS_MASK; - if (status == MPI_IOCSTATUS_SUCCESS && - ioc->facts.FWImageSize == - le32_to_cpu(preply->ActualImageSize)) + int status, transfer_sz; + status = le16_to_cpu(preply->IOCStatus); + if (status == MPI_IOCSTATUS_SUCCESS) { + transfer_sz = le32_to_cpu(preply->ActualImageSize); + if (transfer_sz == sz) cmdStatus = 0; + } } dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n", ioc->name, cmdStatus)); if (cmdStatus) { - ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed, " - "freeing image \n", ioc->name)); + + ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": fw upload failed, freeing image \n", + ioc->name)); mpt_free_fw_memory(ioc); } kfree(prequest); @@ -3944,10 +3723,6 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) CHIPREG_WRITE32(&ioc->chip->IntStatus, 0); if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) { - - if (!ignore) - return 0; - drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset " "address=%p\n", ioc->name, __func__, &ioc->chip->Doorbell, &ioc->chip->Reset_1078)); @@ -3965,7 +3740,6 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) "looking for READY STATE: doorbell=%x" " count=%d\n", ioc->name, doorbell, count)); - if (doorbell == MPI_IOC_STATE_READY) { return 1; } @@ -4116,10 +3890,6 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) doorbell = CHIPREG_READ32(&ioc->chip->Doorbell); doorbell &= MPI_IOC_STATE_MASK; - drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "looking for READY STATE: doorbell=%x" - " count=%d\n", ioc->name, doorbell, count)); - if (doorbell == MPI_IOC_STATE_READY) { break; } @@ -4131,11 +3901,6 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag) mdelay (1000); } } - - if (doorbell != MPI_IOC_STATE_READY) - printk(MYIOC_s_ERR_FMT "Failed to come READY " - "after reset! IocState=%x", ioc->name, - doorbell); } } @@ -4254,9 +4019,8 @@ SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag) if (sleepFlag != CAN_SLEEP) count *= 10; - printk(MYIOC_s_ERR_FMT - "Wait IOC_READY state (0x%x) timeout(%d)!\n", - ioc->name, state, (int)((count+5)/HZ)); + printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n", + ioc->name, (int)((count+5)/HZ)); return -ETIME; } @@ -4326,29 +4090,24 @@ initChainBuffers(MPT_ADAPTER *ioc) * num_sge = num sge in request frame + last chain buffer * scale = num sge per chain buffer if no chain element */ - scale = ioc->req_sz / ioc->SGE_size; - if (ioc->sg_addr_size == sizeof(u64)) - num_sge = scale + (ioc->req_sz - 60) / ioc->SGE_size; + scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); + if (sizeof(dma_addr_t) == sizeof(u64)) + num_sge = scale + (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32)); else - num_sge = 1 + scale + (ioc->req_sz - 64) / ioc->SGE_size; + num_sge = 1+ scale + (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32)); - if (ioc->sg_addr_size == sizeof(u64)) { + if (sizeof(dma_addr_t) == sizeof(u64)) { numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + - (ioc->req_sz - 60) / ioc->SGE_size; + (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32)); } else { - numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + - scale + (ioc->req_sz - 64) / ioc->SGE_size; + numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + + (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32)); } dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n", ioc->name, num_sge, numSGE)); - if (ioc->bus_type == FC) { - if (numSGE > MPT_SCSI_FC_SG_DEPTH) - numSGE = MPT_SCSI_FC_SG_DEPTH; - } else { - if (numSGE > MPT_SCSI_SG_DEPTH) - numSGE = MPT_SCSI_SG_DEPTH; - } + if ( numSGE > MPT_SCSI_SG_DEPTH ) + numSGE = MPT_SCSI_SG_DEPTH; num_chain = 1; while (numSGE - num_sge > 0) { @@ -4402,42 +4161,12 @@ PrimeIocFifos(MPT_ADAPTER *ioc) dma_addr_t alloc_dma; u8 *mem; int i, reply_sz, sz, total_size, num_chain; - u64 dma_mask; - - dma_mask = 0; /* Prime reply FIFO... */ if (ioc->reply_frames == NULL) { if ( (num_chain = initChainBuffers(ioc)) < 0) return -1; - /* - * 1078 errata workaround for the 36GB limitation - */ - if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 && - ioc->dma_mask > DMA_35BIT_MASK) { - if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32)) - && !pci_set_consistent_dma_mask(ioc->pcidev, - DMA_BIT_MASK(32))) { - dma_mask = DMA_35BIT_MASK; - d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "setting 35 bit addressing for " - "Request/Reply/Chain and Sense Buffers\n", - ioc->name)); - } else { - /*Reseting DMA mask to 64 bit*/ - pci_set_dma_mask(ioc->pcidev, - DMA_BIT_MASK(64)); - pci_set_consistent_dma_mask(ioc->pcidev, - DMA_BIT_MASK(64)); - - printk(MYIOC_s_ERR_FMT - "failed setting 35 bit addressing for " - "Request/Reply/Chain and Sense Buffers\n", - ioc->name); - return -1; - } - } total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth); dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n", @@ -4576,16 +4305,9 @@ PrimeIocFifos(MPT_ADAPTER *ioc) alloc_dma += ioc->reply_sz; } - if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev, - ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev, - ioc->dma_mask)) - d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "restoring 64 bit addressing\n", ioc->name)); - return 0; out_fail: - if (ioc->alloc != NULL) { sz = ioc->alloc_sz; pci_free_consistent(ioc->pcidev, @@ -4602,13 +4324,6 @@ PrimeIocFifos(MPT_ADAPTER *ioc) ioc->sense_buf_pool, ioc->sense_buf_pool_dma); ioc->sense_buf_pool = NULL; } - - if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev, - DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev, - DMA_BIT_MASK(64))) - d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "restoring 64 bit addressing\n", ioc->name)); - return -1; } @@ -5044,14 +4759,7 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode) SasIoUnitControlReply_t *sasIoUnitCntrReply; MPT_FRAME_HDR *mf = NULL; MPIHeader_t *mpi_hdr; - int ret = 0; - unsigned long timeleft; - - mutex_lock(&ioc->mptbase_cmds.mutex); - /* init the internal cmd struct */ - memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE); - INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status) /* insure garbage is not sent to fw */ switch(persist_opcode) { @@ -5061,19 +4769,17 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode) break; default: - ret = -1; - goto out; + return -1; + break; } - printk(KERN_DEBUG "%s: persist_opcode=%x\n", - __func__, persist_opcode); + printk("%s: persist_opcode=%x\n",__func__, persist_opcode); /* Get a MF for this command. */ if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { - printk(KERN_DEBUG "%s: no msg frames!\n", __func__); - ret = -1; - goto out; + printk("%s: no msg frames!\n",__func__); + return -1; } mpi_hdr = (MPIHeader_t *) mf; @@ -5083,42 +4789,27 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode) sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext; sasIoUnitCntrReq->Operation = persist_opcode; + init_timer(&ioc->persist_timer); + ioc->persist_timer.data = (unsigned long) ioc; + ioc->persist_timer.function = mpt_timer_expired; + ioc->persist_timer.expires = jiffies + HZ*10 /* 10 sec */; + ioc->persist_wait_done=0; + add_timer(&ioc->persist_timer); mpt_put_msg_frame(mpt_base_index, ioc, mf); - timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done, 10*HZ); - if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { - ret = -ETIME; - printk(KERN_DEBUG "%s: failed\n", __func__); - if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) - goto out; - if (!timeleft) { - printk(KERN_DEBUG "%s: Issuing Reset from %s!!\n", - ioc->name, __func__); - mpt_HardResetHandler(ioc, CAN_SLEEP); - mpt_free_msg_frame(ioc, mf); - } - goto out; - } - - if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { - ret = -1; - goto out; - } + wait_event(mpt_waitq, ioc->persist_wait_done); sasIoUnitCntrReply = - (SasIoUnitControlReply_t *)ioc->mptbase_cmds.reply; + (SasIoUnitControlReply_t *)ioc->persist_reply_frame; if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) { - printk(KERN_DEBUG "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n", - __func__, sasIoUnitCntrReply->IOCStatus, + printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n", + __func__, + sasIoUnitCntrReply->IOCStatus, sasIoUnitCntrReply->IOCLogInfo); - printk(KERN_DEBUG "%s: failed\n", __func__); - ret = -1; - } else - printk(KERN_DEBUG "%s: success\n", __func__); - out: + return -1; + } - CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status) - mutex_unlock(&ioc->mptbase_cmds.mutex); - return ret; + printk("%s: success\n",__func__); + return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -5703,20 +5394,17 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id) * -ENOMEM if pci_alloc failed **/ int -mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, - RaidPhysDiskPage0_t *phys_disk) +mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk) { - CONFIGPARMS cfg; - ConfigPageHeader_t hdr; + CONFIGPARMS cfg; + ConfigPageHeader_t hdr; dma_addr_t dma_handle; pRaidPhysDiskPage0_t buffer = NULL; int rc; memset(&cfg, 0 , sizeof(CONFIGPARMS)); memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); - memset(phys_disk, 0, sizeof(RaidPhysDiskPage0_t)); - hdr.PageVersion = MPI_RAIDPHYSDISKPAGE0_PAGEVERSION; hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK; cfg.cfghdr.hdr = &hdr; cfg.physAddr = -1; @@ -5762,161 +5450,6 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, return rc; } -/** - * mpt_raid_phys_disk_get_num_paths - returns number paths associated to this phys_num - * @ioc: Pointer to a Adapter Structure - * @phys_disk_num: io unit unique phys disk num generated by the ioc - * - * Return: - * returns number paths - **/ -int -mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num) -{ - CONFIGPARMS cfg; - ConfigPageHeader_t hdr; - dma_addr_t dma_handle; - pRaidPhysDiskPage1_t buffer = NULL; - int rc; - - memset(&cfg, 0 , sizeof(CONFIGPARMS)); - memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); - - hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION; - hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK; - hdr.PageNumber = 1; - cfg.cfghdr.hdr = &hdr; - cfg.physAddr = -1; - cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; - - if (mpt_config(ioc, &cfg) != 0) { - rc = 0; - goto out; - } - - if (!hdr.PageLength) { - rc = 0; - goto out; - } - - buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, - &dma_handle); - - if (!buffer) { - rc = 0; - goto out; - } - - cfg.physAddr = dma_handle; - cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; - cfg.pageAddr = phys_disk_num; - - if (mpt_config(ioc, &cfg) != 0) { - rc = 0; - goto out; - } - - rc = buffer->NumPhysDiskPaths; - out: - - if (buffer) - pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer, - dma_handle); - - return rc; -} -EXPORT_SYMBOL(mpt_raid_phys_disk_get_num_paths); - -/** - * mpt_raid_phys_disk_pg1 - returns phys disk page 1 - * @ioc: Pointer to a Adapter Structure - * @phys_disk_num: io unit unique phys disk num generated by the ioc - * @phys_disk: requested payload data returned - * - * Return: - * 0 on success - * -EFAULT if read of config page header fails or data pointer not NULL - * -ENOMEM if pci_alloc failed - **/ -int -mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num, - RaidPhysDiskPage1_t *phys_disk) -{ - CONFIGPARMS cfg; - ConfigPageHeader_t hdr; - dma_addr_t dma_handle; - pRaidPhysDiskPage1_t buffer = NULL; - int rc; - int i; - __le64 sas_address; - - memset(&cfg, 0 , sizeof(CONFIGPARMS)); - memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); - rc = 0; - - hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION; - hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK; - hdr.PageNumber = 1; - cfg.cfghdr.hdr = &hdr; - cfg.physAddr = -1; - cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; - - if (mpt_config(ioc, &cfg) != 0) { - rc = -EFAULT; - goto out; - } - - if (!hdr.PageLength) { - rc = -EFAULT; - goto out; - } - - buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, - &dma_handle); - - if (!buffer) { - rc = -ENOMEM; - goto out; - } - - cfg.physAddr = dma_handle; - cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; - cfg.pageAddr = phys_disk_num; - - if (mpt_config(ioc, &cfg) != 0) { - rc = -EFAULT; - goto out; - } - - phys_disk->NumPhysDiskPaths = buffer->NumPhysDiskPaths; - phys_disk->PhysDiskNum = phys_disk_num; - for (i = 0; i < phys_disk->NumPhysDiskPaths; i++) { - phys_disk->Path[i].PhysDiskID = buffer->Path[i].PhysDiskID; - phys_disk->Path[i].PhysDiskBus = buffer->Path[i].PhysDiskBus; - phys_disk->Path[i].OwnerIdentifier = - buffer->Path[i].OwnerIdentifier; - phys_disk->Path[i].Flags = le16_to_cpu(buffer->Path[i].Flags); - memcpy(&sas_address, &buffer->Path[i].WWID, sizeof(__le64)); - sas_address = le64_to_cpu(sas_address); - memcpy(&phys_disk->Path[i].WWID, &sas_address, sizeof(__le64)); - memcpy(&sas_address, - &buffer->Path[i].OwnerWWID, sizeof(__le64)); - sas_address = le64_to_cpu(sas_address); - memcpy(&phys_disk->Path[i].OwnerWWID, - &sas_address, sizeof(__le64)); - } - - out: - - if (buffer) - pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer, - dma_handle); - - return rc; -} -EXPORT_SYMBOL(mpt_raid_phys_disk_pg1); - - /** * mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes * @ioc: Pointer to a Adapter Strucutre @@ -6242,28 +5775,30 @@ mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc) * SendEventNotification - Send EventNotification (on or off) request to adapter * @ioc: Pointer to MPT_ADAPTER structure * @EvSwitch: Event switch flags - * @sleepFlag: Specifies whether the process can sleep */ static int -SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, int sleepFlag) +SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch) { - EventNotification_t evn; - MPIDefaultReply_t reply_buf; + EventNotification_t *evnp; - memset(&evn, 0, sizeof(EventNotification_t)); - memset(&reply_buf, 0, sizeof(MPIDefaultReply_t)); + evnp = (EventNotification_t *) mpt_get_msg_frame(mpt_base_index, ioc); + if (evnp == NULL) { + devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n", + ioc->name)); + return 0; + } + memset(evnp, 0, sizeof(*evnp)); - evn.Function = MPI_FUNCTION_EVENT_NOTIFICATION; - evn.Switch = EvSwitch; - evn.MsgContext = cpu_to_le32(mpt_base_index << 16); + devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending EventNotification (%d) request %p\n", ioc->name, EvSwitch, evnp)); - devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "Sending EventNotification (%d) request %p\n", - ioc->name, EvSwitch, &evn)); + evnp->Function = MPI_FUNCTION_EVENT_NOTIFICATION; + evnp->ChainOffset = 0; + evnp->MsgFlags = 0; + evnp->Switch = EvSwitch; + + mpt_put_msg_frame(mpt_base_index, ioc, (MPT_FRAME_HDR *)evnp); - return mpt_handshake_req_reply_wait(ioc, sizeof(EventNotification_t), - (u32 *)&evn, sizeof(MPIDefaultReply_t), (u16 *)&reply_buf, 30, - sleepFlag); + return 0; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -6279,7 +5814,7 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp) if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", - ioc->name, __func__)); + ioc->name,__func__)); return -1; } @@ -6316,19 +5851,12 @@ int mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) { Config_t *pReq; - ConfigReply_t *pReply; ConfigExtendedPageHeader_t *pExtHdr = NULL; MPT_FRAME_HDR *mf; - int ii; - int flagsLength; - long timeout; - int ret; - u8 page_type = 0, extend_page; - unsigned long timeleft; unsigned long flags; - int in_isr; - u8 issue_hard_reset = 0; - u8 retry_count = 0; + int ii, rc; + int flagsLength; + int in_isr; /* Prevent calling wait_event() (below), if caller happens * to be in ISR context, because that is fatal! @@ -6338,43 +5866,15 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n", ioc->name)); return -EPERM; - } - - /* don't send a config page during diag reset */ - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); - if (ioc->ioc_reset_in_progress) { - dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: busy with host reset\n", ioc->name, __func__)); - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); - return -EBUSY; - } - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); - - /* don't send if no chance of success */ - if (!ioc->active || - mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_OPERATIONAL) { - dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: ioc not operational, %d, %xh\n", - ioc->name, __func__, ioc->active, - mpt_GetIocState(ioc, 0))); - return -EFAULT; } - retry_config: - mutex_lock(&ioc->mptbase_cmds.mutex); - /* init the internal cmd struct */ - memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE); - INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status) - /* Get and Populate a free Frame */ if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) { - dcprintk(ioc, printk(MYIOC_s_WARN_FMT - "mpt_config: no msg frames!\n", ioc->name)); - ret = -EAGAIN; - goto out; + dcprintk(ioc, printk(MYIOC_s_WARN_FMT "mpt_config: no msg frames!\n", + ioc->name)); + return -EAGAIN; } - pReq = (Config_t *)mf; pReq->Action = pCfg->action; pReq->Reserved = 0; @@ -6400,9 +5900,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) pReq->ExtPageType = pExtHdr->ExtPageType; pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; - /* Page Length must be treated as a reserved field for the - * extended header. - */ + /* Page Length must be treated as a reserved field for the extended header. */ pReq->Header.PageLength = 0; } @@ -6415,91 +5913,78 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) else flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; - if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == - MPI_CONFIG_PAGETYPE_EXTENDED) { + if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) { flagsLength |= pExtHdr->ExtPageLength * 4; - page_type = pReq->ExtPageType; - extend_page = 1; - } else { + + dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Config request type %d, page %d and action %d\n", + ioc->name, pReq->ExtPageType, pReq->Header.PageNumber, pReq->Action)); + } + else { flagsLength |= pCfg->cfghdr.hdr->PageLength * 4; - page_type = pReq->Header.PageType; - extend_page = 0; + + dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Config request type %d, page %d and action %d\n", + ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action)); } - dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "Sending Config request type 0x%x, page 0x%x and action %d\n", - ioc->name, page_type, pReq->Header.PageNumber, pReq->Action)); + mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr); + + /* Append pCfg pointer to end of mf + */ + *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) = (void *) pCfg; - ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr); - timeout = (pCfg->timeout < 15) ? HZ*15 : HZ*pCfg->timeout; + /* Initalize the timer + */ + init_timer_on_stack(&pCfg->timer); + pCfg->timer.data = (unsigned long) ioc; + pCfg->timer.function = mpt_timer_expired; + pCfg->wait_done = 0; + + /* Set the timer; ensure 10 second minimum */ + if (pCfg->timeout < 10) + pCfg->timer.expires = jiffies + HZ*10; + else + pCfg->timer.expires = jiffies + HZ*pCfg->timeout; + + /* Add to end of Q, set timer and then issue this command */ + spin_lock_irqsave(&ioc->FreeQlock, flags); + list_add_tail(&pCfg->linkage, &ioc->configQ); + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + + add_timer(&pCfg->timer); mpt_put_msg_frame(mpt_base_index, ioc, mf); - timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done, - timeout); - if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { - ret = -ETIME; - dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "Failed Sending Config request type 0x%x, page 0x%x," - " action %d, status %xh, time left %ld\n\n", - ioc->name, page_type, pReq->Header.PageNumber, - pReq->Action, ioc->mptbase_cmds.status, timeleft)); - if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) - goto out; - if (!timeleft) - issue_hard_reset = 1; - goto out; - } + wait_event(mpt_waitq, pCfg->wait_done); - if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { - ret = -1; - goto out; - } - pReply = (ConfigReply_t *)ioc->mptbase_cmds.reply; - ret = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK; - if (ret == MPI_IOCSTATUS_SUCCESS) { - if (extend_page) { - pCfg->cfghdr.ehdr->ExtPageLength = - le16_to_cpu(pReply->ExtPageLength); - pCfg->cfghdr.ehdr->ExtPageType = - pReply->ExtPageType; - } - pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion; - pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength; - pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber; - pCfg->cfghdr.hdr->PageType = pReply->Header.PageType; + /* mf has been freed - do not access */ - } + rc = pCfg->status; + + return rc; +} - if (retry_count) - printk(MYIOC_s_INFO_FMT "Retry completed " - "ret=0x%x timeleft=%ld\n", - ioc->name, ret, timeleft); +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mpt_timer_expired - Callback for timer process. + * Used only internal config functionality. + * @data: Pointer to MPT_SCSI_HOST recast as an unsigned long + */ +static void +mpt_timer_expired(unsigned long data) +{ + MPT_ADAPTER *ioc = (MPT_ADAPTER *) data; - dcprintk(ioc, printk(KERN_DEBUG "IOCStatus=%04xh, IOCLogInfo=%08xh\n", - ret, le32_to_cpu(pReply->IOCLogInfo))); + dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_timer_expired! \n", ioc->name)); -out: + /* Perform a FW reload */ + if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0) + printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", ioc->name); - CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status) - mutex_unlock(&ioc->mptbase_cmds.mutex); - if (issue_hard_reset) { - issue_hard_reset = 0; - printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", - ioc->name, __func__); - mpt_HardResetHandler(ioc, CAN_SLEEP); - mpt_free_msg_frame(ioc, mf); - /* attempt one retry for a timed out command */ - if (!retry_count) { - printk(MYIOC_s_INFO_FMT - "Attempting Retry Config request" - " type 0x%x, page 0x%x," - " action %d\n", ioc->name, page_type, - pCfg->cfghdr.hdr->PageNumber, pCfg->action); - retry_count++; - goto retry_config; - } - } - return ret; + /* No more processing. + * Hard reset clean-up will wake up + * process and free all resources. + */ + dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_timer_expired complete!\n", ioc->name)); + return; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -6513,34 +5998,41 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) static int mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) { - switch (reset_phase) { - case MPT_IOC_SETUP_RESET: - ioc->taskmgmt_quiesce_io = 1; - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__)); - break; - case MPT_IOC_PRE_RESET: - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__)); - break; - case MPT_IOC_POST_RESET: - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__)); -/* wake up mptbase_cmds */ - if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) { - ioc->mptbase_cmds.status |= - MPT_MGMT_STATUS_DID_IOCRESET; - complete(&ioc->mptbase_cmds.done); - } -/* wake up taskmgmt_cmds */ - if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) { - ioc->taskmgmt_cmds.status |= - MPT_MGMT_STATUS_DID_IOCRESET; - complete(&ioc->taskmgmt_cmds.done); + CONFIGPARMS *pCfg; + unsigned long flags; + + dprintk(ioc, printk(MYIOC_s_DEBUG_FMT + ": IOC %s_reset routed to MPT base driver!\n", + ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( + reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"))); + + if (reset_phase == MPT_IOC_SETUP_RESET) { + ; + } else if (reset_phase == MPT_IOC_PRE_RESET) { + /* If the internal config Q is not empty - + * delete timer. MF resources will be freed when + * the FIFO's are primed. + */ + spin_lock_irqsave(&ioc->FreeQlock, flags); + list_for_each_entry(pCfg, &ioc->configQ, linkage) + del_timer(&pCfg->timer); + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + + } else { + CONFIGPARMS *pNext; + + /* Search the configQ for internal commands. + * Flush the Q, and wake up all suspended threads. + */ + spin_lock_irqsave(&ioc->FreeQlock, flags); + list_for_each_entry_safe(pCfg, pNext, &ioc->configQ, linkage) { + list_del(&pCfg->linkage); + + pCfg->status = MPT_CONFIG_ERROR; + pCfg->wait_done = 1; + wake_up(&mpt_waitq); } - break; - default: - break; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); } return 1; /* currently means nothing really */ @@ -6852,59 +6344,6 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh *size = y; } -/** - * mpt_set_taskmgmt_in_progress_flag - set flags associated with task managment - * @ioc: Pointer to MPT_ADAPTER structure - * - * Returns 0 for SUCCESS or -1 if FAILED. - * - * If -1 is return, then it was not possible to set the flags - **/ -int -mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc) -{ - unsigned long flags; - int retval; - - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); - if (ioc->ioc_reset_in_progress || ioc->taskmgmt_in_progress || - (ioc->alt_ioc && ioc->alt_ioc->taskmgmt_in_progress)) { - retval = -1; - goto out; - } - retval = 0; - ioc->taskmgmt_in_progress = 1; - ioc->taskmgmt_quiesce_io = 1; - if (ioc->alt_ioc) { - ioc->alt_ioc->taskmgmt_in_progress = 1; - ioc->alt_ioc->taskmgmt_quiesce_io = 1; - } - out: - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); - return retval; -} -EXPORT_SYMBOL(mpt_set_taskmgmt_in_progress_flag); - -/** - * mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task managment - * @ioc: Pointer to MPT_ADAPTER structure - * - **/ -void -mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc) -{ - unsigned long flags; - - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); - ioc->taskmgmt_in_progress = 0; - ioc->taskmgmt_quiesce_io = 0; - if (ioc->alt_ioc) { - ioc->alt_ioc->taskmgmt_in_progress = 0; - ioc->alt_ioc->taskmgmt_quiesce_io = 0; - } - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); -} -EXPORT_SYMBOL(mpt_clear_taskmgmt_in_progress_flag); /** @@ -6958,9 +6397,7 @@ int mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) { int rc; - u8 cb_idx; unsigned long flags; - unsigned long time_count; dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler Entered!\n", ioc->name)); #ifdef MFCNT @@ -6973,15 +6410,14 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) /* Reset the adapter. Prevent more than 1 call to * mpt_do_ioc_recovery at any instant in time. */ - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); - if (ioc->ioc_reset_in_progress) { - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + spin_lock_irqsave(&ioc->diagLock, flags); + if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)){ + spin_unlock_irqrestore(&ioc->diagLock, flags); return 0; + } else { + ioc->diagPending = 1; } - ioc->ioc_reset_in_progress = 1; - if (ioc->alt_ioc) - ioc->alt_ioc->ioc_reset_in_progress = 1; - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + spin_unlock_irqrestore(&ioc->diagLock, flags); /* FIXME: If do_ioc_recovery fails, repeat.... */ @@ -6991,57 +6427,47 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) * Prevents timeouts occurring during a diagnostic reset...very bad. * For all other protocol drivers, this is a no-op. */ - for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { - if (MptResetHandlers[cb_idx]) { - mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET); - if (ioc->alt_ioc) - mpt_signal_reset(cb_idx, ioc->alt_ioc, - MPT_IOC_SETUP_RESET); + { + u8 cb_idx; + int r = 0; + + for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { + if (MptResetHandlers[cb_idx]) { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling IOC reset_setup handler #%d\n", + ioc->name, cb_idx)); + r += mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET); + if (ioc->alt_ioc) { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling alt-%s setup reset handler #%d\n", + ioc->name, ioc->alt_ioc->name, cb_idx)); + r += mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_SETUP_RESET); + } + } } } - time_count = jiffies; - rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag); - if (rc != 0) { - printk(KERN_WARNING MYNAM - ": WARNING - (%d) Cannot recover %s\n", rc, ioc->name); - } else { - if (ioc->hard_resets < -1) - ioc->hard_resets++; + if ((rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag)) != 0) { + printk(MYIOC_s_WARN_FMT "Cannot recover rc = %d!\n", ioc->name, rc); } + ioc->reload_fw = 0; + if (ioc->alt_ioc) + ioc->alt_ioc->reload_fw = 0; - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); - ioc->ioc_reset_in_progress = 0; - ioc->taskmgmt_quiesce_io = 0; - ioc->taskmgmt_in_progress = 0; - if (ioc->alt_ioc) { - ioc->alt_ioc->ioc_reset_in_progress = 0; - ioc->alt_ioc->taskmgmt_quiesce_io = 0; - ioc->alt_ioc->taskmgmt_in_progress = 0; - } - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + spin_lock_irqsave(&ioc->diagLock, flags); + ioc->diagPending = 0; + if (ioc->alt_ioc) + ioc->alt_ioc->diagPending = 0; + spin_unlock_irqrestore(&ioc->diagLock, flags); - dtmprintk(ioc, - printk(MYIOC_s_DEBUG_FMT - "HardResetHandler: completed (%d seconds): %s\n", ioc->name, - jiffies_to_msecs(jiffies - time_count)/1000, ((rc == 0) ? - "SUCCESS" : "FAILED"))); + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler rc = %d!\n", ioc->name, rc)); return rc; } -#ifdef CONFIG_FUSION_LOGGING +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static void -mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply) +EventDescriptionStr(u8 event, u32 evData0, char *evStr) { char *ds = NULL; - u32 evData0; - int ii; - u8 event; - char *evStr = ioc->evStr; - - event = le32_to_cpu(pEventReply->Event) & 0xFF; - evData0 = le32_to_cpu(pEventReply->Data[0]); switch(event) { case MPI_EVENT_NONE: @@ -7075,9 +6501,9 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply) if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP) ds = "Loop State(LIP) Change"; else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE) - ds = "Loop State(LPE) Change"; + ds = "Loop State(LPE) Change"; /* ??? */ else - ds = "Loop State(LPB) Change"; + ds = "Loop State(LPB) Change"; /* ??? */ break; case MPI_EVENT_LOGOUT: ds = "Logout"; @@ -7277,65 +6703,28 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply) } case MPI_EVENT_IR2: { - u8 id = (u8)(evData0); - u8 channel = (u8)(evData0 >> 8); - u8 phys_num = (u8)(evData0 >> 24); u8 ReasonCode = (u8)(evData0 >> 16); - switch (ReasonCode) { case MPI_EVENT_IR2_RC_LD_STATE_CHANGED: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "IR2: LD State Changed: " - "id=%d channel=%d phys_num=%d", - id, channel, phys_num); + ds = "IR2: LD State Changed"; break; case MPI_EVENT_IR2_RC_PD_STATE_CHANGED: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "IR2: PD State Changed " - "id=%d channel=%d phys_num=%d", - id, channel, phys_num); + ds = "IR2: PD State Changed"; break; case MPI_EVENT_IR2_RC_BAD_BLOCK_TABLE_FULL: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "IR2: Bad Block Table Full: " - "id=%d channel=%d phys_num=%d", - id, channel, phys_num); + ds = "IR2: Bad Block Table Full"; break; case MPI_EVENT_IR2_RC_PD_INSERTED: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "IR2: PD Inserted: " - "id=%d channel=%d phys_num=%d", - id, channel, phys_num); + ds = "IR2: PD Inserted"; break; case MPI_EVENT_IR2_RC_PD_REMOVED: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "IR2: PD Removed: " - "id=%d channel=%d phys_num=%d", - id, channel, phys_num); + ds = "IR2: PD Removed"; break; case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "IR2: Foreign CFG Detected: " - "id=%d channel=%d phys_num=%d", - id, channel, phys_num); + ds = "IR2: Foreign CFG Detected"; break; case MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "IR2: Rebuild Medium Error: " - "id=%d channel=%d phys_num=%d", - id, channel, phys_num); - break; - case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "IR2: Dual Port Added: " - "id=%d channel=%d phys_num=%d", - id, channel, phys_num); - break; - case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED: - snprintf(evStr, EVENT_DESCR_STR_SZ, - "IR2: Dual Port Removed: " - "id=%d channel=%d phys_num=%d", - id, channel, phys_num); + ds = "IR2: Rebuild Medium Error"; break; default: ds = "IR2"; @@ -7371,18 +6760,13 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply) case MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: { u8 reason = (u8)(evData0); + u8 port_num = (u8)(evData0 >> 8); + u16 handle = le16_to_cpu(evData0 >> 16); - switch (reason) { - case MPI_EVENT_SAS_INIT_RC_ADDED: - ds = "SAS Initiator Status Change: Added"; - break; - case MPI_EVENT_SAS_INIT_RC_REMOVED: - ds = "SAS Initiator Status Change: Deleted"; - break; - default: - ds = "SAS Initiator Status Change"; - break; - } + snprintf(evStr, EVENT_DESCR_STR_SZ, + "SAS Initiator Device Status Change: reason=0x%02x " + "port=%d handle=0x%04x", + reason, port_num, handle); break; } @@ -7430,24 +6814,6 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply) break; } - case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE: - { - u8 reason = (u8)(evData0); - - switch (reason) { - case MPI_EVENT_SAS_EXP_RC_ADDED: - ds = "Expander Status Change: Added"; - break; - case MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING: - ds = "Expander Status Change: Deleted"; - break; - default: - ds = "Expander Status Change"; - break; - } - break; - } - /* * MPT base "custom" events may be added here... */ @@ -7457,20 +6823,8 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply) } if (ds) strncpy(evStr, ds, EVENT_DESCR_STR_SZ); - - - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "MPT event:(%02Xh) : %s\n", - ioc->name, event, evStr)); - - devtverboseprintk(ioc, printk(KERN_DEBUG MYNAM - ": Event data:\n")); - for (ii = 0; ii < le16_to_cpu(pEventReply->EventDataLength); ii++) - devtverboseprintk(ioc, printk(" %08x", - le32_to_cpu(pEventReply->Data[ii]))); - devtverboseprintk(ioc, printk(KERN_DEBUG "\n")); } -#endif + /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** * ProcessEventNotification - Route EventNotificationReply to all event handlers @@ -7487,24 +6841,37 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply { u16 evDataLen; u32 evData0 = 0; +// u32 evCtx; int ii; u8 cb_idx; int r = 0; int handlers = 0; + char evStr[EVENT_DESCR_STR_SZ]; u8 event; /* * Do platform normalization of values */ event = le32_to_cpu(pEventReply->Event) & 0xFF; +// evCtx = le32_to_cpu(pEventReply->EventContext); evDataLen = le16_to_cpu(pEventReply->EventDataLength); if (evDataLen) { evData0 = le32_to_cpu(pEventReply->Data[0]); } + EventDescriptionStr(event, evData0, evStr); + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event:(%02Xh) : %s\n", + ioc->name, + event, + evStr)); + #ifdef CONFIG_FUSION_LOGGING - if (evDataLen) - mpt_display_event_info(ioc, pEventReply); + devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT + ": Event data:\n", ioc->name)); + for (ii = 0; ii < evDataLen; ii++) + devtverboseprintk(ioc, printk(" %08x", + le32_to_cpu(pEventReply->Data[ii]))); + devtverboseprintk(ioc, printk("\n")); #endif /* @@ -7559,9 +6926,8 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply */ for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) { if (MptEvHandlers[cb_idx]) { - devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "Routing Event to event handler #%d\n", - ioc->name, cb_idx)); + devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Routing Event to event handler #%d\n", + ioc->name, cb_idx)); r += (*(MptEvHandlers[cb_idx]))(ioc, pEventReply); handlers++; } @@ -7645,6 +7011,8 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info) switch (info) { case 0x00010000: desc = "bug! MID not found"; + if (ioc->reload_fw == 0) + ioc->reload_fw++; break; case 0x00020000: @@ -8245,6 +7613,7 @@ EXPORT_SYMBOL(mpt_get_msg_frame); EXPORT_SYMBOL(mpt_put_msg_frame); EXPORT_SYMBOL(mpt_put_msg_frame_hi_pri); EXPORT_SYMBOL(mpt_free_msg_frame); +EXPORT_SYMBOL(mpt_add_sge); EXPORT_SYMBOL(mpt_send_handshake_request); EXPORT_SYMBOL(mpt_verify_adapter); EXPORT_SYMBOL(mpt_GetIocState); @@ -8281,7 +7650,7 @@ fusion_init(void) /* Register ourselves (mptbase) in order to facilitate * EventNotification handling. */ - mpt_base_index = mpt_register(mptbase_reply, MPTBASE_DRIVER); + mpt_base_index = mpt_register(mpt_base_reply, MPTBASE_DRIVER); /* Register for hard reset handling callbacks. */ diff --git a/trunk/drivers/message/fusion/mptbase.h b/trunk/drivers/message/fusion/mptbase.h index 1c8514dc31ca..b3e981d2a506 100644 --- a/trunk/drivers/message/fusion/mptbase.h +++ b/trunk/drivers/message/fusion/mptbase.h @@ -76,8 +76,8 @@ #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR #endif -#define MPT_LINUX_VERSION_COMMON "3.04.10" -#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.09" +#define MPT_LINUX_VERSION_COMMON "3.04.07" +#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.07" #define WHAT_MAGIC_STRING "@" "(" "#" ")" #define show_mptmod_ver(s,ver) \ @@ -104,7 +104,6 @@ #endif #define MPT_NAME_LENGTH 32 -#define MPT_KOBJ_NAME_LEN 20 #define MPT_PROCFS_MPTBASEDIR "mpt" /* chg it to "driver/fusion" ? */ @@ -135,7 +134,6 @@ #define MPT_COALESCING_TIMEOUT 0x10 - /* * SCSI transfer rate defines. */ @@ -163,10 +161,10 @@ /* * Set the MAX_SGE value based on user input. */ -#ifdef CONFIG_FUSION_MAX_SGE -#if CONFIG_FUSION_MAX_SGE < 16 +#ifdef CONFIG_FUSION_MAX_SGE +#if CONFIG_FUSION_MAX_SGE < 16 #define MPT_SCSI_SG_DEPTH 16 -#elif CONFIG_FUSION_MAX_SGE > 128 +#elif CONFIG_FUSION_MAX_SGE > 128 #define MPT_SCSI_SG_DEPTH 128 #else #define MPT_SCSI_SG_DEPTH CONFIG_FUSION_MAX_SGE @@ -175,18 +173,6 @@ #define MPT_SCSI_SG_DEPTH 40 #endif -#ifdef CONFIG_FUSION_MAX_FC_SGE -#if CONFIG_FUSION_MAX_FC_SGE < 16 -#define MPT_SCSI_FC_SG_DEPTH 16 -#elif CONFIG_FUSION_MAX_FC_SGE > 256 -#define MPT_SCSI_FC_SG_DEPTH 256 -#else -#define MPT_SCSI_FC_SG_DEPTH CONFIG_FUSION_MAX_FC_SGE -#endif -#else -#define MPT_SCSI_FC_SG_DEPTH 40 -#endif - /* debug print string length used for events and iocstatus */ # define EVENT_DESCR_STR_SZ 100 @@ -445,36 +431,38 @@ do { \ * IOCTL structure and associated defines */ +#define MPT_IOCTL_STATUS_DID_IOCRESET 0x01 /* IOC Reset occurred on the current*/ +#define MPT_IOCTL_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */ +#define MPT_IOCTL_STATUS_TIMER_ACTIVE 0x04 /* The timer is running */ +#define MPT_IOCTL_STATUS_SENSE_VALID 0x08 /* Sense data is valid */ +#define MPT_IOCTL_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */ +#define MPT_IOCTL_STATUS_TMTIMER_ACTIVE 0x20 /* The TM timer is running */ +#define MPT_IOCTL_STATUS_TM_FAILED 0x40 /* User TM request failed */ + #define MPTCTL_RESET_OK 0x01 /* Issue Bus Reset */ -#define MPT_MGMT_STATUS_RF_VALID 0x01 /* The Reply Frame is VALID */ -#define MPT_MGMT_STATUS_COMMAND_GOOD 0x02 /* Command Status GOOD */ -#define MPT_MGMT_STATUS_PENDING 0x04 /* command is pending */ -#define MPT_MGMT_STATUS_DID_IOCRESET 0x08 /* IOC Reset occurred - on the current*/ -#define MPT_MGMT_STATUS_SENSE_VALID 0x10 /* valid sense info */ -#define MPT_MGMT_STATUS_TIMER_ACTIVE 0x20 /* obsolete */ -#define MPT_MGMT_STATUS_FREE_MF 0x40 /* free the mf from - complete routine */ - -#define INITIALIZE_MGMT_STATUS(status) \ - status = MPT_MGMT_STATUS_PENDING; -#define CLEAR_MGMT_STATUS(status) \ - status = 0; -#define CLEAR_MGMT_PENDING_STATUS(status) \ - status &= ~MPT_MGMT_STATUS_PENDING; -#define SET_MGMT_MSG_CONTEXT(msg_context, value) \ - msg_context = value; - -typedef struct _MPT_MGMT { +typedef struct _MPT_IOCTL { + struct _MPT_ADAPTER *ioc; + u8 ReplyFrame[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */ + u8 sense[MPT_SENSE_BUFFER_ALLOC]; + int wait_done; /* wake-up value for this ioc */ + u8 rsvd; + u8 status; /* current command status */ + u8 reset; /* 1 if bus reset allowed */ + u8 id; /* target for reset */ + struct mutex ioctl_mutex; +} MPT_IOCTL; + +#define MPT_SAS_MGMT_STATUS_RF_VALID 0x02 /* The Reply Frame is VALID */ +#define MPT_SAS_MGMT_STATUS_COMMAND_GOOD 0x10 /* Command Status GOOD */ +#define MPT_SAS_MGMT_STATUS_TM_FAILED 0x40 /* User TM request failed */ + +typedef struct _MPT_SAS_MGMT { struct mutex mutex; struct completion done; u8 reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */ - u8 sense[MPT_SENSE_BUFFER_ALLOC]; u8 status; /* current command status */ - int completion_code; - u32 msg_context; -} MPT_MGMT; +}MPT_SAS_MGMT; /* * Event Structure and define @@ -576,10 +564,6 @@ struct mptfc_rport_info u8 flags; }; -typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr); -typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length, - dma_addr_t dma_addr); - /* * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS */ @@ -589,10 +573,6 @@ typedef struct _MPT_ADAPTER int pci_irq; /* This irq */ char name[MPT_NAME_LENGTH]; /* "iocN" */ char prod_name[MPT_NAME_LENGTH]; /* "LSIFC9x9" */ -#ifdef CONFIG_FUSION_LOGGING - /* used in mpt_display_event_info */ - char evStr[EVENT_DESCR_STR_SZ]; -#endif char board_name[16]; char board_assembly[16]; char board_tracer[16]; @@ -620,10 +600,6 @@ typedef struct _MPT_ADAPTER int reply_depth; /* Num Allocated reply frames */ int reply_sz; /* Reply frame size */ int num_chain; /* Number of chain buffers */ - MPT_ADD_SGE add_sge; /* Pointer to add_sge - function */ - MPT_ADD_CHAIN add_chain; /* Pointer to add_chain - function */ /* Pool of buffers for chaining. ReqToChain * and ChainToChain track index of chain buffers. * ChainBuffer (DMA) virt/phys addresses. @@ -664,8 +640,11 @@ typedef struct _MPT_ADAPTER RaidCfgData raid_data; /* Raid config. data */ SasCfgData sas_data; /* Sas config. data */ FcCfgData fc_data; /* Fc config. data */ + MPT_IOCTL *ioctl; /* ioctl data pointer */ struct proc_dir_entry *ioc_dentry; struct _MPT_ADAPTER *alt_ioc; /* ptr to 929 bound adapter port */ + spinlock_t diagLock; /* diagnostic reset lock */ + int diagPending; u32 biosVersion; /* BIOS version from IO Unit Page 2 */ int eventTypes; /* Event logging parameters */ int eventContext; /* Next event context */ @@ -673,6 +652,7 @@ typedef struct _MPT_ADAPTER struct _mpt_ioctl_events *events; /* pointer to event log */ u8 *cached_fw; /* Pointer to FW */ dma_addr_t cached_fw_dma; + struct list_head configQ; /* linked list of config. requests */ int hs_reply_idx; #ifndef MFCNT u32 pad0; @@ -685,6 +665,9 @@ typedef struct _MPT_ADAPTER IOCFactsReply_t facts; PortFactsReply_t pfacts[2]; FCPortPage0_t fc_port_page0[2]; + struct timer_list persist_timer; /* persist table timer */ + int persist_wait_done; /* persist completion flag */ + u8 persist_reply_frame[MPT_DEFAULT_FRAME_SIZE]; /* persist reply */ LANPage0_t lan_cnfg_page0; LANPage1_t lan_cnfg_page1; @@ -699,44 +682,23 @@ typedef struct _MPT_ADAPTER int aen_event_read_flag; /* flag to indicate event log was read*/ u8 FirstWhoInit; u8 upload_fw; /* If set, do a fw upload */ + u8 reload_fw; /* Force a FW Reload on next reset */ u8 NBShiftFactor; /* NB Shift Factor based on Block Size (Facts) */ u8 pad1[4]; u8 DoneCtx; u8 TaskCtx; u8 InternalCtx; + spinlock_t initializing_hba_lock; + int initializing_hba_lock_flag; struct list_head list; struct net_device *netdev; struct list_head sas_topology; struct mutex sas_topology_mutex; - - struct workqueue_struct *fw_event_q; - struct list_head fw_event_list; - spinlock_t fw_event_lock; - u8 fw_events_off; /* if '1', then ignore events */ - char fw_event_q_name[MPT_KOBJ_NAME_LEN]; - struct mutex sas_discovery_mutex; u8 sas_discovery_runtime; u8 sas_discovery_ignore_events; - - /* port_info object for the host */ - struct mptsas_portinfo *hba_port_info; - u64 hba_port_sas_addr; - u16 hba_port_num_phy; - struct list_head sas_device_info_list; - struct mutex sas_device_info_mutex; - u8 old_sas_discovery_protocal; - u8 sas_discovery_quiesce_io; int sas_index; /* index refrencing */ - MPT_MGMT sas_mgmt; - MPT_MGMT mptbase_cmds; /* for sending config pages */ - MPT_MGMT internal_cmds; - MPT_MGMT taskmgmt_cmds; - MPT_MGMT ioctl_cmds; - spinlock_t taskmgmt_lock; /* diagnostic reset lock */ - int taskmgmt_in_progress; - u8 taskmgmt_quiesce_io; - u8 ioc_reset_in_progress; + MPT_SAS_MGMT sas_mgmt; struct work_struct sas_persist_task; struct work_struct fc_setup_reset_work; @@ -745,27 +707,15 @@ typedef struct _MPT_ADAPTER u8 fc_link_speed[2]; spinlock_t fc_rescan_work_lock; struct work_struct fc_rescan_work; - char fc_rescan_work_q_name[MPT_KOBJ_NAME_LEN]; + char fc_rescan_work_q_name[20]; struct workqueue_struct *fc_rescan_work_q; - - /* driver forced bus resets count */ - unsigned long hard_resets; - /* fw/external bus resets count */ - unsigned long soft_resets; - /* cmd timeouts */ - unsigned long timeouts; - struct scsi_cmnd **ScsiLookup; spinlock_t scsi_lookup_lock; - u64 dma_mask; - u32 broadcast_aen_busy; - char reset_work_q_name[MPT_KOBJ_NAME_LEN]; + + char reset_work_q_name[20]; struct workqueue_struct *reset_work_q; struct delayed_work fault_reset_work; - - u8 sg_addr_size; - u8 in_rescan; - u8 SGE_size; + spinlock_t fault_reset_work_lock; } MPT_ADAPTER; @@ -803,14 +753,13 @@ typedef struct _mpt_sge { dma_addr_t Address; } MptSge_t; +#define mpt_addr_size() \ + ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SGE_FLAGS_64_BIT_ADDRESSING : \ + MPI_SGE_FLAGS_32_BIT_ADDRESSING) -#define mpt_msg_flags(ioc) \ - (ioc->sg_addr_size == sizeof(u64)) ? \ - MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \ - MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32 - -#define MPT_SGE_FLAGS_64_BIT_ADDRESSING \ - (MPI_SGE_FLAGS_64_BIT_ADDRESSING << MPI_SGE_FLAGS_SHIFT) +#define mpt_msg_flags() \ + ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \ + MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -886,14 +835,22 @@ typedef struct _MPT_SCSI_HOST { /* Pool of memory for holding SCpnts before doing * OS callbacks. freeQ is the free pool. */ + u8 tmPending; + u8 resetPending; u8 negoNvram; /* DV disabled, nego NVRAM */ u8 pad1; + u8 tmState; u8 rsvd[2]; MPT_FRAME_HDR *cmdPtr; /* Ptr to nonOS request */ struct scsi_cmnd *abortSCpnt; MPT_LOCAL_REPLY localReply; /* internal cmd reply struct */ + unsigned long hard_resets; /* driver forced bus resets count */ + unsigned long soft_resets; /* fw/external bus resets count */ + unsigned long timeouts; /* cmd timeouts */ ushort sel_timeout[MPT_MAX_FC_DEVICES]; char *info_kbuf; + wait_queue_head_t scandv_waitq; + int scandv_wait_done; long last_queue_full; u16 tm_iocstatus; u16 spi_pending; @@ -913,16 +870,21 @@ struct scsi_cmnd; * Generic structure passed to the base mpt_config function. */ typedef struct _x_config_parms { + struct list_head linkage; /* linked list */ + struct timer_list timer; /* timer function for this request */ union { ConfigExtendedPageHeader_t *ehdr; ConfigPageHeader_t *hdr; } cfghdr; dma_addr_t physAddr; + int wait_done; /* wait for this request */ u32 pageAddr; /* properly formatted */ - u16 status; u8 action; u8 dir; u8 timeout; /* seconds */ + u8 pad1; + u16 status; + u16 pad2; } CONFIGPARMS; /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -947,6 +909,7 @@ extern MPT_FRAME_HDR *mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc); extern void mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); extern void mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); extern void mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf); +extern void mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr); extern int mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag); extern int mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp); @@ -959,12 +922,6 @@ extern void mpt_free_fw_memory(MPT_ADAPTER *ioc); extern int mpt_findImVolumes(MPT_ADAPTER *ioc); extern int mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode); extern int mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk); -extern int mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num, - pRaidPhysDiskPage1_t phys_disk); -extern int mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, - u8 phys_disk_num); -extern int mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc); -extern void mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc); extern void mpt_halt_firmware(MPT_ADAPTER *ioc); @@ -1002,6 +959,7 @@ extern int mpt_fwfault_debug; #define MPT_SGE_FLAGS_END_OF_BUFFER (0x40000000) #define MPT_SGE_FLAGS_LOCAL_ADDRESS (0x08000000) #define MPT_SGE_FLAGS_DIRECTION (0x04000000) +#define MPT_SGE_FLAGS_ADDRESSING (mpt_addr_size() << MPI_SGE_FLAGS_SHIFT) #define MPT_SGE_FLAGS_END_OF_LIST (0x01000000) #define MPT_SGE_FLAGS_TRANSACTION_ELEMENT (0x00000000) @@ -1014,12 +972,14 @@ extern int mpt_fwfault_debug; MPT_SGE_FLAGS_END_OF_BUFFER | \ MPT_SGE_FLAGS_END_OF_LIST | \ MPT_SGE_FLAGS_SIMPLE_ELEMENT | \ + MPT_SGE_FLAGS_ADDRESSING | \ MPT_TRANSFER_IOC_TO_HOST) #define MPT_SGE_FLAGS_SSIMPLE_WRITE \ (MPT_SGE_FLAGS_LAST_ELEMENT | \ MPT_SGE_FLAGS_END_OF_BUFFER | \ MPT_SGE_FLAGS_END_OF_LIST | \ MPT_SGE_FLAGS_SIMPLE_ELEMENT | \ + MPT_SGE_FLAGS_ADDRESSING | \ MPT_TRANSFER_HOST_TO_IOC) /*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ diff --git a/trunk/drivers/message/fusion/mptctl.c b/trunk/drivers/message/fusion/mptctl.c index 9b2e2198aee9..c63817117c0a 100644 --- a/trunk/drivers/message/fusion/mptctl.c +++ b/trunk/drivers/message/fusion/mptctl.c @@ -84,7 +84,6 @@ MODULE_VERSION(my_VERSION); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS; -static u8 mptctl_taskmgmt_id = MPT_MAX_PROTOCOL_DRIVERS; static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait ); @@ -128,7 +127,10 @@ static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc); static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTER *ioc); -static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function); +static void mptctl_timeout_expired (MPT_IOCTL *ioctl); +static int mptctl_bus_reset(MPT_IOCTL *ioctl); +static int mptctl_set_tm_flags(MPT_SCSI_HOST *hd); +static void mptctl_free_tm_flags(MPT_ADAPTER *ioc); /* * Reset Handler cleanup function @@ -181,10 +183,10 @@ mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock) int rc = 0; if (nonblock) { - if (!mutex_trylock(&ioc->ioctl_cmds.mutex)) + if (!mutex_trylock(&ioc->ioctl->ioctl_mutex)) rc = -EAGAIN; } else { - if (mutex_lock_interruptible(&ioc->ioctl_cmds.mutex)) + if (mutex_lock_interruptible(&ioc->ioctl->ioctl_mutex)) rc = -ERESTARTSYS; } return rc; @@ -200,78 +202,99 @@ mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock) static int mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) { - char *sense_data; - int req_index; - int sz; + char *sense_data; + int sz, req_index; + u16 iocStatus; + u8 cmd; - if (!req) - return 0; + if (req) + cmd = req->u.hdr.Function; + else + return 1; + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tcompleting mpi function (0x%02X), req=%p, " + "reply=%p\n", ioc->name, req->u.hdr.Function, req, reply)); - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "completing mpi function " - "(0x%02X), req=%p, reply=%p\n", ioc->name, req->u.hdr.Function, - req, reply)); + if (ioc->ioctl) { - /* - * Handling continuation of the same reply. Processing the first - * reply, and eating the other replys that come later. - */ - if (ioc->ioctl_cmds.msg_context != req->u.hdr.MsgContext) - goto out_continuation; + if (reply==NULL) { - ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_reply() NULL Reply " + "Function=%x!\n", ioc->name, cmd)); - if (!reply) - goto out; + ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; + ioc->ioctl->reset &= ~MPTCTL_RESET_OK; - ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_RF_VALID; - sz = min(ioc->reply_sz, 4*reply->u.reply.MsgLength); - memcpy(ioc->ioctl_cmds.reply, reply, sz); + /* We are done, issue wake up + */ + ioc->ioctl->wait_done = 1; + wake_up (&mptctl_wait); + return 1; - if (reply->u.reply.IOCStatus || reply->u.reply.IOCLogInfo) - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "iocstatus (0x%04X), loginfo (0x%08X)\n", ioc->name, - le16_to_cpu(reply->u.reply.IOCStatus), - le32_to_cpu(reply->u.reply.IOCLogInfo))); - - if ((req->u.hdr.Function == MPI_FUNCTION_SCSI_IO_REQUEST) || - (req->u.hdr.Function == - MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { - - if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState) - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "scsi_status (0x%02x), scsi_state (0x%02x), " - "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name, - reply->u.sreply.SCSIStatus, - reply->u.sreply.SCSIState, - le16_to_cpu(reply->u.sreply.TaskTag), - le32_to_cpu(reply->u.sreply.TransferCount))); - - if (reply->u.sreply.SCSIState & - MPI_SCSI_STATE_AUTOSENSE_VALID) { + } + + /* Copy the reply frame (which much exist + * for non-SCSI I/O) to the IOC structure. + */ + memcpy(ioc->ioctl->ReplyFrame, reply, + min(ioc->reply_sz, 4*reply->u.reply.MsgLength)); + ioc->ioctl->status |= MPT_IOCTL_STATUS_RF_VALID; + + /* Set the command status to GOOD if IOC Status is GOOD + * OR if SCSI I/O cmd and data underrun or recovered error. + */ + iocStatus = le16_to_cpu(reply->u.reply.IOCStatus) & MPI_IOCSTATUS_MASK; + if (iocStatus == MPI_IOCSTATUS_SUCCESS) + ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; + + if (iocStatus || reply->u.reply.IOCLogInfo) + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tiocstatus (0x%04X), " + "loginfo (0x%08X)\n", ioc->name, + iocStatus, + le32_to_cpu(reply->u.reply.IOCLogInfo))); + + if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) || + (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { + + if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState) + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "\tscsi_status (0x%02x), scsi_state (0x%02x), " + "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name, + reply->u.sreply.SCSIStatus, + reply->u.sreply.SCSIState, + le16_to_cpu(reply->u.sreply.TaskTag), + le32_to_cpu(reply->u.sreply.TransferCount))); + + ioc->ioctl->reset &= ~MPTCTL_RESET_OK; + + if ((iocStatus == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN) || + (iocStatus == MPI_IOCSTATUS_SCSI_RECOVERED_ERROR)) { + ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD; + } + } + + /* Copy the sense data - if present + */ + if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) && + (reply->u.sreply.SCSIState & + MPI_SCSI_STATE_AUTOSENSE_VALID)){ sz = req->u.scsireq.SenseBufferLength; req_index = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx); - sense_data = ((u8 *)ioc->sense_buf_pool + + sense_data = + ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC)); - memcpy(ioc->ioctl_cmds.sense, sense_data, sz); - ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_SENSE_VALID; + memcpy(ioc->ioctl->sense, sense_data, sz); + ioc->ioctl->status |= MPT_IOCTL_STATUS_SENSE_VALID; } - } - out: - /* We are done, issue wake up - */ - if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) { - if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT) - mpt_clear_taskmgmt_in_progress_flag(ioc); - ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING; - complete(&ioc->ioctl_cmds.done); - } + if (cmd == MPI_FUNCTION_SCSI_TASK_MGMT) + mptctl_free_tm_flags(ioc); - out_continuation: - if (reply && (reply->u.reply.MsgFlags & - MPI_MSGFLAGS_CONTINUATION_REPLY)) - return 0; + /* We are done, issue wake up + */ + ioc->ioctl->wait_done = 1; + wake_up (&mptctl_wait); + } return 1; } @@ -281,66 +304,30 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) * Expecting an interrupt, however timed out. * */ -static void -mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf) +static void mptctl_timeout_expired (MPT_IOCTL *ioctl) { - unsigned long flags; - - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n", - ioc->name, __func__)); - - if (mpt_fwfault_debug) - mpt_halt_firmware(ioc); - - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); - if (ioc->ioc_reset_in_progress) { - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); - CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status) - mpt_free_msg_frame(ioc, mf); - return; - } - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + int rc = 1; - - if (!mptctl_bus_reset(ioc, mf->u.hdr.Function)) + if (ioctl == NULL) return; + dctlprintk(ioctl->ioc, + printk(MYIOC_s_DEBUG_FMT ": Timeout Expired! Host %d\n", + ioctl->ioc->name, ioctl->ioc->id)); - /* Issue a reset for this device. - * The IOC is not responding. - */ - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n", - ioc->name)); - CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status) - mpt_HardResetHandler(ioc, CAN_SLEEP); - mpt_free_msg_frame(ioc, mf); -} + ioctl->wait_done = 0; + if (ioctl->reset & MPTCTL_RESET_OK) + rc = mptctl_bus_reset(ioctl); -static int -mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) -{ - if (!mf) - return 0; - - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "TaskMgmt completed (mf=%p, mr=%p)\n", - ioc->name, mf, mr)); - - ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; - - if (!mr) - goto out; - - ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID; - memcpy(ioc->taskmgmt_cmds.reply, mr, - min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength)); - out: - if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) { - mpt_clear_taskmgmt_in_progress_flag(ioc); - ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING; - complete(&ioc->taskmgmt_cmds.done); - return 1; + if (rc) { + /* Issue a reset for this device. + * The IOC is not responding. + */ + dctlprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n", + ioctl->ioc->name)); + mpt_HardResetHandler(ioctl->ioc, CAN_SLEEP); } - return 0; + return; + } /* mptctl_bus_reset @@ -348,150 +335,133 @@ mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) * Bus reset code. * */ -static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function) +static int mptctl_bus_reset(MPT_IOCTL *ioctl) { MPT_FRAME_HDR *mf; SCSITaskMgmt_t *pScsiTm; - SCSITaskMgmtReply_t *pScsiTmReply; + MPT_SCSI_HOST *hd; int ii; - int retval; - unsigned long timeout; - unsigned long time_count; - u16 iocstatus; - - /* bus reset is only good for SCSI IO, RAID PASSTHRU */ - if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) || - (function == MPI_FUNCTION_SCSI_IO_REQUEST)) { - dtmprintk(ioc, printk(MYIOC_s_WARN_FMT - "TaskMgmt, not SCSI_IO!!\n", ioc->name)); + int retval=0; + + + ioctl->reset &= ~MPTCTL_RESET_OK; + + if (ioctl->ioc->sh == NULL) return -EPERM; - } - mutex_lock(&ioc->taskmgmt_cmds.mutex); - if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { - mutex_unlock(&ioc->taskmgmt_cmds.mutex); + hd = shost_priv(ioctl->ioc->sh); + if (hd == NULL) return -EPERM; - } - retval = 0; + /* Single threading .... + */ + if (mptctl_set_tm_flags(hd) != 0) + return -EPERM; /* Send request */ - mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc); - if (mf == NULL) { - dtmprintk(ioc, printk(MYIOC_s_WARN_FMT - "TaskMgmt, no msg frames!!\n", ioc->name)); - mpt_clear_taskmgmt_in_progress_flag(ioc); - retval = -ENOMEM; - goto mptctl_bus_reset_done; + if ((mf = mpt_get_msg_frame(mptctl_id, ioctl->ioc)) == NULL) { + dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt, no msg frames!!\n", + ioctl->ioc->name)); + + mptctl_free_tm_flags(ioctl->ioc); + return -ENOMEM; } - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n", - ioc->name, mf)); + dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n", + ioctl->ioc->name, mf)); pScsiTm = (SCSITaskMgmt_t *) mf; - memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t)); - pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; - pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; - pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION; - pScsiTm->TargetID = 0; - pScsiTm->Bus = 0; + pScsiTm->TargetID = ioctl->id; + pScsiTm->Bus = hd->port; /* 0 */ pScsiTm->ChainOffset = 0; + pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; pScsiTm->Reserved = 0; + pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS; pScsiTm->Reserved1 = 0; - pScsiTm->TaskMsgContext = 0; + pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION; + for (ii= 0; ii < 8; ii++) pScsiTm->LUN[ii] = 0; + for (ii=0; ii < 7; ii++) pScsiTm->Reserved2[ii] = 0; - switch (ioc->bus_type) { - case FC: - timeout = 40; - break; - case SAS: - timeout = 30; - break; - case SPI: - default: - timeout = 2; - break; - } + pScsiTm->TaskMsgContext = 0; + dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT + "mptctl_bus_reset: issued.\n", ioctl->ioc->name)); - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "TaskMgmt type=%d timeout=%ld\n", - ioc->name, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, timeout)); + DBG_DUMP_TM_REQUEST_FRAME(ioctl->ioc, (u32 *)mf); - INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status) - CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status) - time_count = jiffies; - if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && - (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) - mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf); + ioctl->wait_done=0; + + if ((ioctl->ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && + (ioctl->ioc->facts.MsgVersion >= MPI_VERSION_01_05)) + mpt_put_msg_frame_hi_pri(mptctl_id, ioctl->ioc, mf); else { - retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc, - sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP); + retval = mpt_send_handshake_request(mptctl_id, ioctl->ioc, + sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP); if (retval != 0) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "TaskMgmt send_handshake FAILED!" - " (ioc %p, mf %p, rc=%d) \n", ioc->name, - ioc, mf, retval)); - mpt_clear_taskmgmt_in_progress_flag(ioc); + dfailprintk(ioctl->ioc, printk(MYIOC_s_ERR_FMT "_send_handshake FAILED!" + " (hd %p, ioc %p, mf %p) \n", hd->ioc->name, hd, + hd->ioc, mf)); goto mptctl_bus_reset_done; } } /* Now wait for the command to complete */ - ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ); - if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "TaskMgmt failed\n", ioc->name)); - mpt_free_msg_frame(ioc, mf); - mpt_clear_taskmgmt_in_progress_flag(ioc); - if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) - retval = 0; - else - retval = -1; /* return failure */ - goto mptctl_bus_reset_done; - } + ii = wait_event_timeout(mptctl_wait, + ioctl->wait_done == 1, + HZ*5 /* 5 second timeout */); - if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "TaskMgmt failed\n", ioc->name)); - retval = -1; /* return failure */ - goto mptctl_bus_reset_done; - } - - pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply; - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, " - "iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, " - "term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus, - pScsiTmReply->TargetID, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, - le16_to_cpu(pScsiTmReply->IOCStatus), - le32_to_cpu(pScsiTmReply->IOCLogInfo), - pScsiTmReply->ResponseCode, - le32_to_cpu(pScsiTmReply->TerminationCount))); - - iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK; - - if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED || - iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED || - iocstatus == MPI_IOCSTATUS_SUCCESS) - retval = 0; - else { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "TaskMgmt failed\n", ioc->name)); + if(ii <=0 && (ioctl->wait_done != 1 )) { + mpt_free_msg_frame(hd->ioc, mf); + ioctl->wait_done = 0; retval = -1; /* return failure */ } +mptctl_bus_reset_done: - mptctl_bus_reset_done: - mutex_unlock(&ioc->taskmgmt_cmds.mutex); - CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status) + mptctl_free_tm_flags(ioctl->ioc); return retval; } +static int +mptctl_set_tm_flags(MPT_SCSI_HOST *hd) { + unsigned long flags; + + spin_lock_irqsave(&hd->ioc->FreeQlock, flags); + + if (hd->tmState == TM_STATE_NONE) { + hd->tmState = TM_STATE_IN_PROGRESS; + hd->tmPending = 1; + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + } else { + spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags); + return -EBUSY; + } + + return 0; +} + +static void +mptctl_free_tm_flags(MPT_ADAPTER *ioc) +{ + MPT_SCSI_HOST * hd; + unsigned long flags; + + hd = shost_priv(ioc->sh); + if (hd == NULL) + return; + + spin_lock_irqsave(&ioc->FreeQlock, flags); + + hd->tmState = TM_STATE_NONE; + hd->tmPending = 0; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + + return; +} /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* mptctl_ioc_reset @@ -503,23 +473,22 @@ static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function) static int mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) { + MPT_IOCTL *ioctl = ioc->ioctl; + dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOC %s_reset routed to IOCTL driver!\n", ioc->name, + reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( + reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"))); + + if(ioctl == NULL) + return 1; + switch(reset_phase) { case MPT_IOC_SETUP_RESET: - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__)); - break; - case MPT_IOC_PRE_RESET: - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__)); + ioctl->status |= MPT_IOCTL_STATUS_DID_IOCRESET; break; case MPT_IOC_POST_RESET: - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__)); - if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) { - ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_DID_IOCRESET; - complete(&ioc->ioctl_cmds.done); - } + ioctl->status &= ~MPT_IOCTL_STATUS_DID_IOCRESET; break; + case MPT_IOC_PRE_RESET: default: break; } @@ -673,7 +642,7 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) else ret = -EINVAL; - mutex_unlock(&iocp->ioctl_cmds.mutex); + mutex_unlock(&iocp->ioctl->ioctl_mutex); return ret; } @@ -789,7 +758,6 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) int sge_offset = 0; u16 iocstat; pFWDownloadReply_t ReplyMsg = NULL; - unsigned long timeleft; if (mpt_verify_adapter(ioc, &iocp) < 0) { printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n", @@ -873,9 +841,8 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) * 96 8 * 64 4 */ - maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - - sizeof(FWDownloadTCSGE_t)) - / iocp->SGE_size; + maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t)) + / (sizeof(dma_addr_t) + sizeof(u32)); if (numfrags > maxfrags) { ret = -EMLINK; goto fwdl_out; @@ -903,7 +870,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) if (nib == 0 || nib == 3) { ; } else if (sgIn->Address) { - iocp->add_sge(sgOut, sgIn->FlagsLength, sgIn->Address); + mpt_add_sge(sgOut, sgIn->FlagsLength, sgIn->Address); n++; if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) { printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - " @@ -915,7 +882,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) } sgIn++; bl++; - sgOut += iocp->SGE_size; + sgOut += (sizeof(dma_addr_t) + sizeof(u32)); } DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags); @@ -924,30 +891,16 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) * Finally, perform firmware download. */ ReplyMsg = NULL; - SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, dlmsg->MsgContext); - INITIALIZE_MGMT_STATUS(iocp->ioctl_cmds.status) mpt_put_msg_frame(mptctl_id, iocp, mf); /* Now wait for the command to complete */ -retry_wait: - timeleft = wait_for_completion_timeout(&iocp->ioctl_cmds.done, HZ*60); - if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { - ret = -ETIME; - printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__); - if (iocp->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { - mpt_free_msg_frame(iocp, mf); - goto fwdl_out; - } - if (!timeleft) - mptctl_timeout_expired(iocp, mf); - else - goto retry_wait; - goto fwdl_out; - } + ret = wait_event_timeout(mptctl_wait, + iocp->ioctl->wait_done == 1, + HZ*60); - if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { - printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__); - mpt_free_msg_frame(iocp, mf); + if(ret <=0 && (iocp->ioctl->wait_done != 1 )) { + /* Now we need to reset the board */ + mptctl_timeout_expired(iocp->ioctl); ret = -ENODATA; goto fwdl_out; } @@ -955,7 +908,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) if (sgl) kfree_sgl(sgl, sgl_dma, buflist, iocp); - ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply; + ReplyMsg = (pFWDownloadReply_t)iocp->ioctl->ReplyFrame; iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK; if (iocstat == MPI_IOCSTATUS_SUCCESS) { printk(MYIOC_s_INFO_FMT "F/W update successfull!\n", iocp->name); @@ -979,9 +932,6 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen) return 0; fwdl_out: - - CLEAR_MGMT_STATUS(iocp->ioctl_cmds.status); - SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, 0); kfree_sgl(sgl, sgl_dma, buflist, iocp); return ret; } @@ -1053,7 +1003,7 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags, * */ sgl = sglbuf; - sg_spill = ((ioc->req_sz - sge_offset)/ioc->SGE_size) - 1; + sg_spill = ((ioc->req_sz - sge_offset)/(sizeof(dma_addr_t) + sizeof(u32))) - 1; while (bytes_allocd < bytes) { this_alloc = min(alloc_sz, bytes-bytes_allocd); buflist[buflist_ent].len = this_alloc; @@ -1074,9 +1024,8 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags, dma_addr_t dma_addr; bytes_allocd += this_alloc; - sgl->FlagsLength = (0x10000000|sgdir|this_alloc); - dma_addr = pci_map_single(ioc->pcidev, - buflist[buflist_ent].kptr, this_alloc, dir); + sgl->FlagsLength = (0x10000000|MPT_SGE_FLAGS_ADDRESSING|sgdir|this_alloc); + dma_addr = pci_map_single(ioc->pcidev, buflist[buflist_ent].kptr, this_alloc, dir); sgl->Address = dma_addr; fragcnt++; @@ -1822,10 +1771,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) int msgContext; u16 req_idx; ulong timeout; - unsigned long timeleft; struct scsi_device *sdev; - unsigned long flags; - u8 function; /* bufIn and bufOut are used for user to kernel space transfers */ @@ -1838,23 +1784,24 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) __FILE__, __LINE__, iocnum); return -ENODEV; } - - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); - if (ioc->ioc_reset_in_progress) { - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + if (!ioc->ioctl) { printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " - "Busy with diagnostic reset\n", __FILE__, __LINE__); + "No memory available during driver init.\n", + __FILE__, __LINE__); + return -ENOMEM; + } else if (ioc->ioctl->status & MPT_IOCTL_STATUS_DID_IOCRESET) { + printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - " + "Busy with IOC Reset \n", __FILE__, __LINE__); return -EBUSY; } - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); /* Verify that the final request frame will not be too large. */ sz = karg.dataSgeOffset * 4; if (karg.dataInSize > 0) - sz += ioc->SGE_size; + sz += sizeof(dma_addr_t) + sizeof(u32); if (karg.dataOutSize > 0) - sz += ioc->SGE_size; + sz += sizeof(dma_addr_t) + sizeof(u32); if (sz > ioc->req_sz) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " @@ -1880,12 +1827,10 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to read MF from mpt_ioctl_command struct @ %p\n", ioc->name, __FILE__, __LINE__, mfPtr); - function = -1; rc = -EFAULT; goto done_free_mem; } hdr->MsgContext = cpu_to_le32(msgContext); - function = hdr->Function; /* Verify that this request is allowed. @@ -1893,7 +1838,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n", ioc->name, hdr->Function, mf)); - switch (function) { + switch (hdr->Function) { case MPI_FUNCTION_IOC_FACTS: case MPI_FUNCTION_PORT_FACTS: karg.dataOutSize = karg.dataInSize = 0; @@ -1948,7 +1893,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) } pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; - pScsiReq->MsgFlags |= mpt_msg_flags(ioc); + pScsiReq->MsgFlags |= mpt_msg_flags(); /* verify that app has not requested @@ -1990,6 +1935,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) pScsiReq->Control = cpu_to_le32(scsidir | qtag); pScsiReq->DataLength = cpu_to_le32(dataSize); + ioc->ioctl->reset = MPTCTL_RESET_OK; + ioc->ioctl->id = pScsiReq->TargetID; } else { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " @@ -2032,7 +1979,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) int dataSize; pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH; - pScsiReq->MsgFlags |= mpt_msg_flags(ioc); + pScsiReq->MsgFlags |= mpt_msg_flags(); /* verify that app has not requested @@ -2067,6 +2014,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) pScsiReq->Control = cpu_to_le32(scsidir | qtag); pScsiReq->DataLength = cpu_to_le32(dataSize); + ioc->ioctl->reset = MPTCTL_RESET_OK; + ioc->ioctl->id = pScsiReq->TargetID; } else { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "SCSI driver is not loaded. \n", @@ -2077,17 +2026,20 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) break; case MPI_FUNCTION_SCSI_TASK_MGMT: - { - SCSITaskMgmt_t *pScsiTm; - pScsiTm = (SCSITaskMgmt_t *)mf; - dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "\tTaskType=0x%x MsgFlags=0x%x " - "TaskMsgContext=0x%x id=%d channel=%d\n", - ioc->name, pScsiTm->TaskType, le32_to_cpu - (pScsiTm->TaskMsgContext), pScsiTm->MsgFlags, - pScsiTm->TargetID, pScsiTm->Bus)); + { + MPT_SCSI_HOST *hd = NULL; + if ((ioc->sh == NULL) || ((hd = shost_priv(ioc->sh)) == NULL)) { + printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " + "SCSI driver not loaded or SCSI host not found. \n", + ioc->name, __FILE__, __LINE__); + rc = -EFAULT; + goto done_free_mem; + } else if (mptctl_set_tm_flags(hd) != 0) { + rc = -EPERM; + goto done_free_mem; + } + } break; - } case MPI_FUNCTION_IOC_INIT: { @@ -2171,7 +2123,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) if (karg.dataInSize > 0) { flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | - MPI_SGE_FLAGS_DIRECTION) + MPI_SGE_FLAGS_DIRECTION | + mpt_addr_size() ) << MPI_SGE_FLAGS_SHIFT; } else { flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE; @@ -2188,8 +2141,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) /* Set up this SGE. * Copy to MF and to sglbuf */ - ioc->add_sge(psge, flagsLength, dma_addr_out); - psge += ioc->SGE_size; + mpt_add_sge(psge, flagsLength, dma_addr_out); + psge += (sizeof(u32) + sizeof(dma_addr_t)); /* Copy user data to kernel space. */ @@ -2222,25 +2175,18 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) /* Set up this SGE * Copy to MF and to sglbuf */ - ioc->add_sge(psge, flagsLength, dma_addr_in); + mpt_add_sge(psge, flagsLength, dma_addr_in); } } } else { /* Add a NULL SGE */ - ioc->add_sge(psge, flagsLength, (dma_addr_t) -1); + mpt_add_sge(psge, flagsLength, (dma_addr_t) -1); } - SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, hdr->MsgContext); - INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) + ioc->ioctl->wait_done = 0; if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) { - mutex_lock(&ioc->taskmgmt_cmds.mutex); - if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { - mutex_unlock(&ioc->taskmgmt_cmds.mutex); - goto done_free_mem; - } - DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && @@ -2251,11 +2197,10 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP); if (rc != 0) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "send_handshake FAILED! (ioc %p, mf %p)\n", + "_send_handshake FAILED! (ioc %p, mf %p)\n", ioc->name, ioc, mf)); - mpt_clear_taskmgmt_in_progress_flag(ioc); + mptctl_free_tm_flags(ioc); rc = -ENODATA; - mutex_unlock(&ioc->taskmgmt_cmds.mutex); goto done_free_mem; } } @@ -2265,47 +2210,36 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) /* Now wait for the command to complete */ timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT; -retry_wait: - timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, - HZ*timeout); - if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { - rc = -ETIME; - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "%s: TIMED OUT!\n", - ioc->name, __func__)); - if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { - if (function == MPI_FUNCTION_SCSI_TASK_MGMT) - mutex_unlock(&ioc->taskmgmt_cmds.mutex); - goto done_free_mem; - } - if (!timeleft) { - if (function == MPI_FUNCTION_SCSI_TASK_MGMT) - mutex_unlock(&ioc->taskmgmt_cmds.mutex); - mptctl_timeout_expired(ioc, mf); - mf = NULL; - } else - goto retry_wait; - goto done_free_mem; - } + timeout = wait_event_timeout(mptctl_wait, + ioc->ioctl->wait_done == 1, + HZ*timeout); + + if(timeout <=0 && (ioc->ioctl->wait_done != 1 )) { + /* Now we need to reset the board */ - if (function == MPI_FUNCTION_SCSI_TASK_MGMT) - mutex_unlock(&ioc->taskmgmt_cmds.mutex); + if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) + mptctl_free_tm_flags(ioc); + mptctl_timeout_expired(ioc->ioctl); + rc = -ENODATA; + goto done_free_mem; + } mf = NULL; /* If a valid reply frame, copy to the user. * Offset 2: reply length in U32's */ - if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) { + if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) { if (karg.maxReplyBytes < ioc->reply_sz) { - sz = min(karg.maxReplyBytes, - 4*ioc->ioctl_cmds.reply[2]); + sz = min(karg.maxReplyBytes, 4*ioc->ioctl->ReplyFrame[2]); } else { - sz = min(ioc->reply_sz, 4*ioc->ioctl_cmds.reply[2]); + sz = min(ioc->reply_sz, 4*ioc->ioctl->ReplyFrame[2]); } + if (sz > 0) { if (copy_to_user(karg.replyFrameBufPtr, - ioc->ioctl_cmds.reply, sz)){ + &ioc->ioctl->ReplyFrame, sz)){ printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to write out reply frame %p\n", @@ -2318,11 +2252,10 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) /* If valid sense data, copy to user. */ - if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_SENSE_VALID) { + if (ioc->ioctl->status & MPT_IOCTL_STATUS_SENSE_VALID) { sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE); if (sz > 0) { - if (copy_to_user(karg.senseDataPtr, - ioc->ioctl_cmds.sense, sz)) { + if (copy_to_user(karg.senseDataPtr, ioc->ioctl->sense, sz)) { printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - " "Unable to write sense data to user %p\n", ioc->name, __FILE__, __LINE__, @@ -2336,7 +2269,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) /* If the overall status is _GOOD and data in, copy data * to user. */ - if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD) && + if ((ioc->ioctl->status & MPT_IOCTL_STATUS_COMMAND_GOOD) && (karg.dataInSize > 0) && (bufIn.kptr)) { if (copy_to_user(karg.dataInBufPtr, @@ -2351,8 +2284,9 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr) done_free_mem: - CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) - SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); + ioc->ioctl->status &= ~(MPT_IOCTL_STATUS_COMMAND_GOOD | + MPT_IOCTL_STATUS_SENSE_VALID | + MPT_IOCTL_STATUS_RF_VALID ); /* Free the allocated memory. */ @@ -2402,8 +2336,6 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) ToolboxIstwiReadWriteRequest_t *IstwiRWRequest; MPT_FRAME_HDR *mf = NULL; MPIHeader_t *mpi_hdr; - unsigned long timeleft; - int retval; /* Reset long to int. Should affect IA64 and SPARC only */ @@ -2534,9 +2466,9 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) MPT_SCSI_HOST *hd = shost_priv(ioc->sh); if (hd && (cim_rev == 1)) { - karg.hard_resets = ioc->hard_resets; - karg.soft_resets = ioc->soft_resets; - karg.timeouts = ioc->timeouts; + karg.hard_resets = hd->hard_resets; + karg.soft_resets = hd->soft_resets; + karg.timeouts = hd->timeouts; } } @@ -2544,8 +2476,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) * Gather ISTWI(Industry Standard Two Wire Interface) Data */ if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) { - dfailprintk(ioc, printk(MYIOC_s_WARN_FMT - "%s, no msg frames!!\n", ioc->name, __func__)); + dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n", + ioc->name,__func__)); goto out; } @@ -2566,29 +2498,22 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma); if (!pbuf) goto out; - ioc->add_sge((char *)&IstwiRWRequest->SGL, + mpt_add_sge((char *)&IstwiRWRequest->SGL, (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma); - retval = 0; - SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, - IstwiRWRequest->MsgContext); - INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status) + ioc->ioctl->wait_done = 0; mpt_put_msg_frame(mptctl_id, ioc, mf); -retry_wait: - timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done, - HZ*MPT_IOCTL_DEFAULT_TIMEOUT); - if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { - retval = -ETIME; - printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__); - if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { - mpt_free_msg_frame(ioc, mf); - goto out; - } - if (!timeleft) - mptctl_timeout_expired(ioc, mf); - else - goto retry_wait; + rc = wait_event_timeout(mptctl_wait, + ioc->ioctl->wait_done == 1, + HZ*MPT_IOCTL_DEFAULT_TIMEOUT /* 10 sec */); + + if(rc <=0 && (ioc->ioctl->wait_done != 1 )) { + /* + * Now we need to reset the board + */ + mpt_free_msg_frame(ioc, mf); + mptctl_timeout_expired(ioc->ioctl); goto out; } @@ -2601,13 +2526,10 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) * bays have drives in them * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3) */ - if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) + if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) karg.rsvd = *(u32 *)pbuf; out: - CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status) - SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0); - if (pbuf) pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma); @@ -2831,7 +2753,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd, ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen); - mutex_unlock(&iocp->ioctl_cmds.mutex); + mutex_unlock(&iocp->ioctl->ioctl_mutex); return ret; } @@ -2885,7 +2807,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd, */ ret = mptctl_do_mpt_command (karg, &uarg->MF); - mutex_unlock(&iocp->ioctl_cmds.mutex); + mutex_unlock(&iocp->ioctl->ioctl_mutex); return ret; } @@ -2937,10 +2859,21 @@ static long compat_mpctl_ioctl(struct file *f, unsigned int cmd, unsigned long a static int mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + MPT_IOCTL *mem; MPT_ADAPTER *ioc = pci_get_drvdata(pdev); - mutex_init(&ioc->ioctl_cmds.mutex); - init_completion(&ioc->ioctl_cmds.done); + /* + * Allocate and inite a MPT_IOCTL structure + */ + mem = kzalloc(sizeof(MPT_IOCTL), GFP_KERNEL); + if (!mem) { + mptctl_remove(pdev); + return -ENOMEM; + } + + ioc->ioctl = mem; + ioc->ioctl->ioc = ioc; + mutex_init(&ioc->ioctl->ioctl_mutex); return 0; } @@ -2954,6 +2887,9 @@ mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id) static void mptctl_remove(struct pci_dev *pdev) { + MPT_ADAPTER *ioc = pci_get_drvdata(pdev); + + kfree ( ioc->ioctl ); } static struct mpt_pci_driver mptctl_driver = { @@ -2993,7 +2929,6 @@ static int __init mptctl_init(void) goto out_fail; } - mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER); mpt_reset_register(mptctl_id, mptctl_ioc_reset); mpt_event_register(mptctl_id, mptctl_event_process); @@ -3018,7 +2953,6 @@ static void mptctl_exit(void) /* De-register callback handler from base module */ mpt_deregister(mptctl_id); - mpt_reset_deregister(mptctl_taskmgmt_id); mpt_device_driver_deregister(MPTCTL_DRIVER); diff --git a/trunk/drivers/message/fusion/mptdebug.h b/trunk/drivers/message/fusion/mptdebug.h index 28e478879284..510b9f492093 100644 --- a/trunk/drivers/message/fusion/mptdebug.h +++ b/trunk/drivers/message/fusion/mptdebug.h @@ -58,7 +58,6 @@ #define MPT_DEBUG_FC 0x00080000 #define MPT_DEBUG_SAS 0x00100000 #define MPT_DEBUG_SAS_WIDE 0x00200000 -#define MPT_DEBUG_36GB_MEM 0x00400000 /* * CONFIG_FUSION_LOGGING - enabled in Kconfig @@ -136,8 +135,6 @@ #define dsaswideprintk(IOC, CMD) \ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE) -#define d36memprintk(IOC, CMD) \ - MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_36GB_MEM) /* diff --git a/trunk/drivers/message/fusion/mptfc.c b/trunk/drivers/message/fusion/mptfc.c index e61df133a59e..c3c24fdf9fb6 100644 --- a/trunk/drivers/message/fusion/mptfc.c +++ b/trunk/drivers/message/fusion/mptfc.c @@ -1251,15 +1251,17 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) * A slightly different algorithm is required for * 64bit SGEs. */ - scale = ioc->req_sz/ioc->SGE_size; - if (ioc->sg_addr_size == sizeof(u64)) { + scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); + if (sizeof(dma_addr_t) == sizeof(u64)) { numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + - (ioc->req_sz - 60) / ioc->SGE_size; + (ioc->req_sz - 60) / (sizeof(dma_addr_t) + + sizeof(u32)); } else { numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + - (ioc->req_sz - 64) / ioc->SGE_size; + (ioc->req_sz - 64) / (sizeof(dma_addr_t) + + sizeof(u32)); } if (numSGE < sh->sg_tablesize) { @@ -1290,6 +1292,9 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Clear the TM flags */ + hd->tmPending = 0; + hd->tmState = TM_STATE_NONE; + hd->resetPending = 0; hd->abortSCpnt = NULL; /* Clear the pointer used to store @@ -1307,6 +1312,8 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) hd->timer.data = (unsigned long) hd; hd->timer.function = mptscsih_timer_expired; + init_waitqueue_head(&hd->scandv_waitq); + hd->scandv_wait_done = 0; hd->last_queue_full = 0; sh->transportt = mptfc_transport_template; diff --git a/trunk/drivers/message/fusion/mptsas.c b/trunk/drivers/message/fusion/mptsas.c index 20e0b447e8e8..79f5433359f9 100644 --- a/trunk/drivers/message/fusion/mptsas.c +++ b/trunk/drivers/message/fusion/mptsas.c @@ -93,37 +93,8 @@ static u8 mptsasDoneCtx = MPT_MAX_PROTOCOL_DRIVERS; static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS; static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */ static u8 mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS; -static u8 mptsasDeviceResetCtx = MPT_MAX_PROTOCOL_DRIVERS; - -static void mptsas_firmware_event_work(struct work_struct *work); -static void mptsas_send_sas_event(struct fw_event_work *fw_event); -static void mptsas_send_raid_event(struct fw_event_work *fw_event); -static void mptsas_send_ir2_event(struct fw_event_work *fw_event); -static void mptsas_parse_device_info(struct sas_identify *identify, - struct mptsas_devinfo *device_info); -static inline void mptsas_set_rphy(MPT_ADAPTER *ioc, - struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy); -static struct mptsas_phyinfo *mptsas_find_phyinfo_by_sas_address - (MPT_ADAPTER *ioc, u64 sas_address); -static int mptsas_sas_device_pg0(MPT_ADAPTER *ioc, - struct mptsas_devinfo *device_info, u32 form, u32 form_specific); -static int mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, - struct mptsas_enclosure *enclosure, u32 form, u32 form_specific); -static int mptsas_add_end_device(MPT_ADAPTER *ioc, - struct mptsas_phyinfo *phy_info); -static void mptsas_del_end_device(MPT_ADAPTER *ioc, - struct mptsas_phyinfo *phy_info); -static void mptsas_send_link_status_event(struct fw_event_work *fw_event); -static struct mptsas_portinfo *mptsas_find_portinfo_by_sas_address - (MPT_ADAPTER *ioc, u64 sas_address); -static void mptsas_expander_delete(MPT_ADAPTER *ioc, - struct mptsas_portinfo *port_info, u8 force); -static void mptsas_send_expander_event(struct fw_event_work *fw_event); -static void mptsas_not_responding_devices(MPT_ADAPTER *ioc); -static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc); -static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event); -static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event); -static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id); + +static void mptsas_hotplug_work(struct work_struct *work); static void mptsas_print_phy_data(MPT_ADAPTER *ioc, MPI_SAS_IO_UNIT0_PHY_DATA *phy_data) @@ -247,115 +218,6 @@ static void mptsas_print_expander_pg1(MPT_ADAPTER *ioc, SasExpanderPage1_t *pg1) le16_to_cpu(pg1->AttachedDevHandle))); } -/* inhibit sas firmware event handling */ -static void -mptsas_fw_event_off(MPT_ADAPTER *ioc) -{ - unsigned long flags; - - spin_lock_irqsave(&ioc->fw_event_lock, flags); - ioc->fw_events_off = 1; - ioc->sas_discovery_quiesce_io = 0; - spin_unlock_irqrestore(&ioc->fw_event_lock, flags); - -} - -/* enable sas firmware event handling */ -static void -mptsas_fw_event_on(MPT_ADAPTER *ioc) -{ - unsigned long flags; - - spin_lock_irqsave(&ioc->fw_event_lock, flags); - ioc->fw_events_off = 0; - spin_unlock_irqrestore(&ioc->fw_event_lock, flags); -} - -/* queue a sas firmware event */ -static void -mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event, - unsigned long delay) -{ - unsigned long flags; - - spin_lock_irqsave(&ioc->fw_event_lock, flags); - list_add_tail(&fw_event->list, &ioc->fw_event_list); - INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work); - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)\n", - ioc->name, __func__, fw_event)); - queue_delayed_work(ioc->fw_event_q, &fw_event->work, - delay); - spin_unlock_irqrestore(&ioc->fw_event_lock, flags); -} - -/* requeue a sas firmware event */ -static void -mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event, - unsigned long delay) -{ - unsigned long flags; - spin_lock_irqsave(&ioc->fw_event_lock, flags); - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task " - "(fw_event=0x%p)\n", ioc->name, __func__, fw_event)); - fw_event->retries++; - queue_delayed_work(ioc->fw_event_q, &fw_event->work, - msecs_to_jiffies(delay)); - spin_unlock_irqrestore(&ioc->fw_event_lock, flags); -} - -/* free memory assoicated to a sas firmware event */ -static void -mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event) -{ - unsigned long flags; - - spin_lock_irqsave(&ioc->fw_event_lock, flags); - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: kfree (fw_event=0x%p)\n", - ioc->name, __func__, fw_event)); - list_del(&fw_event->list); - kfree(fw_event); - spin_unlock_irqrestore(&ioc->fw_event_lock, flags); -} - -/* walk the firmware event queue, and either stop or wait for - * outstanding events to complete */ -static void -mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc) -{ - struct fw_event_work *fw_event, *next; - struct mptsas_target_reset_event *target_reset_list, *n; - u8 flush_q; - MPT_SCSI_HOST *hd = shost_priv(ioc->sh); - - /* flush the target_reset_list */ - if (!list_empty(&hd->target_reset_list)) { - list_for_each_entry_safe(target_reset_list, n, - &hd->target_reset_list, list) { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: removing target reset for id=%d\n", - ioc->name, __func__, - target_reset_list->sas_event_data.TargetID)); - list_del(&target_reset_list->list); - kfree(target_reset_list); - } - } - - if (list_empty(&ioc->fw_event_list) || - !ioc->fw_event_q || in_interrupt()) - return; - - flush_q = 0; - list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) { - if (cancel_delayed_work(&fw_event->work)) - mptsas_free_fw_event(ioc, fw_event); - else - flush_q = 1; - } - if (flush_q) - flush_workqueue(ioc->fw_event_q); -} - - static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy) { struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); @@ -368,6 +230,20 @@ static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy) return ((MPT_SCSI_HOST *)shost->hostdata)->ioc; } +static struct mptsas_portinfo * +mptsas_get_hba_portinfo(MPT_ADAPTER *ioc) +{ + struct list_head *head = &ioc->sas_topology; + struct mptsas_portinfo *pi = NULL; + + /* always the first entry on sas_topology list */ + + if (!list_empty(head)) + pi = list_entry(head->next, struct mptsas_portinfo, list); + + return pi; +} + /* * mptsas_find_portinfo_by_handle * @@ -389,38 +265,6 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle) return rc; } -/** - * mptsas_find_portinfo_by_sas_address - - * @ioc: Pointer to MPT_ADAPTER structure - * @handle: - * - * This function should be called with the sas_topology_mutex already held - * - **/ -static struct mptsas_portinfo * -mptsas_find_portinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address) -{ - struct mptsas_portinfo *port_info, *rc = NULL; - int i; - - if (sas_address >= ioc->hba_port_sas_addr && - sas_address < (ioc->hba_port_sas_addr + - ioc->hba_port_num_phy)) - return ioc->hba_port_info; - - mutex_lock(&ioc->sas_topology_mutex); - list_for_each_entry(port_info, &ioc->sas_topology, list) - for (i = 0; i < port_info->num_phys; i++) - if (port_info->phy_info[i].identify.sas_address == - sas_address) { - rc = port_info; - goto out; - } - out: - mutex_unlock(&ioc->sas_topology_mutex); - return rc; -} - /* * Returns true if there is a scsi end device */ @@ -464,7 +308,6 @@ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_detai if(phy_info->port_details != port_details) continue; memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo)); - mptsas_set_rphy(ioc, phy_info, NULL); phy_info->port_details = NULL; } kfree(port_details); @@ -536,285 +379,6 @@ starget) phy_info->port_details->starget = starget; } -/** - * mptsas_add_device_component - - * @ioc: Pointer to MPT_ADAPTER structure - * @channel: fw mapped id's - * @id: - * @sas_address: - * @device_info: - * - **/ -static void -mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id, - u64 sas_address, u32 device_info, u16 slot, u64 enclosure_logical_id) -{ - struct mptsas_device_info *sas_info, *next; - struct scsi_device *sdev; - struct scsi_target *starget; - struct sas_rphy *rphy; - - /* - * Delete all matching devices out of the list - */ - mutex_lock(&ioc->sas_device_info_mutex); - list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list, - list) { - if (!sas_info->is_logical_volume && - (sas_info->sas_address == sas_address || - (sas_info->fw.channel == channel && - sas_info->fw.id == id))) { - list_del(&sas_info->list); - kfree(sas_info); - } - } - - sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL); - if (!sas_info) - goto out; - - /* - * Set Firmware mapping - */ - sas_info->fw.id = id; - sas_info->fw.channel = channel; - - sas_info->sas_address = sas_address; - sas_info->device_info = device_info; - sas_info->slot = slot; - sas_info->enclosure_logical_id = enclosure_logical_id; - INIT_LIST_HEAD(&sas_info->list); - list_add_tail(&sas_info->list, &ioc->sas_device_info_list); - - /* - * Set OS mapping - */ - shost_for_each_device(sdev, ioc->sh) { - starget = scsi_target(sdev); - rphy = dev_to_rphy(starget->dev.parent); - if (rphy->identify.sas_address == sas_address) { - sas_info->os.id = starget->id; - sas_info->os.channel = starget->channel; - } - } - - out: - mutex_unlock(&ioc->sas_device_info_mutex); - return; -} - -/** - * mptsas_add_device_component_by_fw - - * @ioc: Pointer to MPT_ADAPTER structure - * @channel: fw mapped id's - * @id: - * - **/ -static void -mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id) -{ - struct mptsas_devinfo sas_device; - struct mptsas_enclosure enclosure_info; - int rc; - - rc = mptsas_sas_device_pg0(ioc, &sas_device, - (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << - MPI_SAS_DEVICE_PGAD_FORM_SHIFT), - (channel << 8) + id); - if (rc) - return; - - memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure)); - mptsas_sas_enclosure_pg0(ioc, &enclosure_info, - (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE << - MPI_SAS_ENCLOS_PGAD_FORM_SHIFT), - sas_device.handle_enclosure); - - mptsas_add_device_component(ioc, sas_device.channel, - sas_device.id, sas_device.sas_address, sas_device.device_info, - sas_device.slot, enclosure_info.enclosure_logical_id); -} - -/** - * mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding each individual device to list - * @ioc: Pointer to MPT_ADAPTER structure - * @channel: fw mapped id's - * @id: - * - **/ -static void -mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc, - struct scsi_target *starget) -{ - CONFIGPARMS cfg; - ConfigPageHeader_t hdr; - dma_addr_t dma_handle; - pRaidVolumePage0_t buffer = NULL; - int i; - RaidPhysDiskPage0_t phys_disk; - struct mptsas_device_info *sas_info, *next; - - memset(&cfg, 0 , sizeof(CONFIGPARMS)); - memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); - hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME; - /* assumption that all volumes on channel = 0 */ - cfg.pageAddr = starget->id; - cfg.cfghdr.hdr = &hdr; - cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER; - cfg.timeout = 10; - - if (mpt_config(ioc, &cfg) != 0) - goto out; - - if (!hdr.PageLength) - goto out; - - buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, - &dma_handle); - - if (!buffer) - goto out; - - cfg.physAddr = dma_handle; - cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; - - if (mpt_config(ioc, &cfg) != 0) - goto out; - - if (!buffer->NumPhysDisks) - goto out; - - /* - * Adding entry for hidden components - */ - for (i = 0; i < buffer->NumPhysDisks; i++) { - - if (mpt_raid_phys_disk_pg0(ioc, - buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0) - continue; - - mptsas_add_device_component_by_fw(ioc, phys_disk.PhysDiskBus, - phys_disk.PhysDiskID); - - mutex_lock(&ioc->sas_device_info_mutex); - list_for_each_entry(sas_info, &ioc->sas_device_info_list, - list) { - if (!sas_info->is_logical_volume && - (sas_info->fw.channel == phys_disk.PhysDiskBus && - sas_info->fw.id == phys_disk.PhysDiskID)) { - sas_info->is_hidden_raid_component = 1; - sas_info->volume_id = starget->id; - } - } - mutex_unlock(&ioc->sas_device_info_mutex); - - } - - /* - * Delete all matching devices out of the list - */ - mutex_lock(&ioc->sas_device_info_mutex); - list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list, - list) { - if (sas_info->is_logical_volume && sas_info->fw.id == - starget->id) { - list_del(&sas_info->list); - kfree(sas_info); - } - } - - sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL); - if (sas_info) { - sas_info->fw.id = starget->id; - sas_info->os.id = starget->id; - sas_info->os.channel = starget->channel; - sas_info->is_logical_volume = 1; - INIT_LIST_HEAD(&sas_info->list); - list_add_tail(&sas_info->list, &ioc->sas_device_info_list); - } - mutex_unlock(&ioc->sas_device_info_mutex); - - out: - if (buffer) - pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer, - dma_handle); -} - -/** - * mptsas_add_device_component_starget - - * @ioc: Pointer to MPT_ADAPTER structure - * @starget: - * - **/ -static void -mptsas_add_device_component_starget(MPT_ADAPTER *ioc, - struct scsi_target *starget) -{ - VirtTarget *vtarget; - struct sas_rphy *rphy; - struct mptsas_phyinfo *phy_info = NULL; - struct mptsas_enclosure enclosure_info; - - rphy = dev_to_rphy(starget->dev.parent); - vtarget = starget->hostdata; - phy_info = mptsas_find_phyinfo_by_sas_address(ioc, - rphy->identify.sas_address); - if (!phy_info) - return; - - memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure)); - mptsas_sas_enclosure_pg0(ioc, &enclosure_info, - (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE << - MPI_SAS_ENCLOS_PGAD_FORM_SHIFT), - phy_info->attached.handle_enclosure); - - mptsas_add_device_component(ioc, phy_info->attached.channel, - phy_info->attached.id, phy_info->attached.sas_address, - phy_info->attached.device_info, - phy_info->attached.slot, enclosure_info.enclosure_logical_id); -} - -/** - * mptsas_del_device_component_by_os - Once a device has been removed, we mark the entry in the list as being cached - * @ioc: Pointer to MPT_ADAPTER structure - * @channel: os mapped id's - * @id: - * - **/ -static void -mptsas_del_device_component_by_os(MPT_ADAPTER *ioc, u8 channel, u8 id) -{ - struct mptsas_device_info *sas_info, *next; - - /* - * Set is_cached flag - */ - list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list, - list) { - if (sas_info->os.channel == channel && sas_info->os.id == id) - sas_info->is_cached = 1; - } -} - -/** - * mptsas_del_device_components - Cleaning the list - * @ioc: Pointer to MPT_ADAPTER structure - * - **/ -static void -mptsas_del_device_components(MPT_ADAPTER *ioc) -{ - struct mptsas_device_info *sas_info, *next; - - mutex_lock(&ioc->sas_device_info_mutex); - list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list, - list) { - list_del(&sas_info->list); - kfree(sas_info); - } - mutex_unlock(&ioc->sas_device_info_mutex); -} - /* * mptsas_setup_wide_ports @@ -870,8 +434,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info) * Forming a port */ if (!port_details) { - port_details = kzalloc(sizeof(struct - mptsas_portinfo_details), GFP_KERNEL); + port_details = kzalloc(sizeof(*port_details), + GFP_KERNEL); if (!port_details) goto out; port_details->num_phys = 1; @@ -959,62 +523,15 @@ mptsas_find_vtarget(MPT_ADAPTER *ioc, u8 channel, u8 id) VirtTarget *vtarget = NULL; shost_for_each_device(sdev, ioc->sh) { - vdevice = sdev->hostdata; - if ((vdevice == NULL) || - (vdevice->vtarget == NULL)) - continue; - if ((vdevice->vtarget->tflags & - MPT_TARGET_FLAGS_RAID_COMPONENT || - vdevice->vtarget->raidVolume)) + if ((vdevice = sdev->hostdata) == NULL) continue; if (vdevice->vtarget->id == id && - vdevice->vtarget->channel == channel) + vdevice->vtarget->channel == channel) vtarget = vdevice->vtarget; } return vtarget; } -static void -mptsas_queue_device_delete(MPT_ADAPTER *ioc, - MpiEventDataSasDeviceStatusChange_t *sas_event_data) -{ - struct fw_event_work *fw_event; - int sz; - - sz = offsetof(struct fw_event_work, event_data) + - sizeof(MpiEventDataSasDeviceStatusChange_t); - fw_event = kzalloc(sz, GFP_ATOMIC); - if (!fw_event) { - printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", - ioc->name, __func__, __LINE__); - return; - } - memcpy(fw_event->event_data, sas_event_data, - sizeof(MpiEventDataSasDeviceStatusChange_t)); - fw_event->event = MPI_EVENT_SAS_DEVICE_STATUS_CHANGE; - fw_event->ioc = ioc; - mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1)); -} - -static void -mptsas_queue_rescan(MPT_ADAPTER *ioc) -{ - struct fw_event_work *fw_event; - int sz; - - sz = offsetof(struct fw_event_work, event_data); - fw_event = kzalloc(sz, GFP_ATOMIC); - if (!fw_event) { - printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", - ioc->name, __func__, __LINE__); - return; - } - fw_event->event = -1; - fw_event->ioc = ioc; - mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1)); -} - - /** * mptsas_target_reset * @@ -1033,21 +550,13 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id) { MPT_FRAME_HDR *mf; SCSITaskMgmt_t *pScsiTm; - if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) - return 0; - - mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc); - if (mf == NULL) { - dfailprintk(ioc, printk(MYIOC_s_WARN_FMT - "%s, no msg frames @%d!!\n", ioc->name, - __func__, __LINE__)); - goto out_fail; + if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) { + dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames @%d!!\n", + ioc->name,__func__, __LINE__)); + return 0; } - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n", - ioc->name, mf)); - /* Format the Request */ pScsiTm = (SCSITaskMgmt_t *) mf; @@ -1060,18 +569,9 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id) DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf); - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "TaskMgmt type=%d (sas device delete) fw_channel = %d fw_id = %d)\n", - ioc->name, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, channel, id)); - - mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf); + mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf); return 1; - - out_fail: - - mpt_clear_taskmgmt_in_progress_flag(ioc); - return 0; } /** @@ -1102,12 +602,11 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc, vtarget->deleted = 1; /* block IO */ - target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event), + target_reset_list = kzalloc(sizeof(*target_reset_list), GFP_ATOMIC); if (!target_reset_list) { - dfailprintk(ioc, printk(MYIOC_s_WARN_FMT - "%s, failed to allocate mem @%d..!!\n", - ioc->name, __func__, __LINE__)); + dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n", + ioc->name,__func__, __LINE__)); return; } @@ -1115,101 +614,84 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc, sizeof(*sas_event_data)); list_add_tail(&target_reset_list->list, &hd->target_reset_list); - target_reset_list->time_count = jiffies; + if (hd->resetPending) + return; if (mptsas_target_reset(ioc, channel, id)) { target_reset_list->target_reset_issued = 1; + hd->resetPending = 1; } } /** - * mptsas_taskmgmt_complete - complete SAS task management function - * @ioc: Pointer to MPT_ADAPTER structure + * mptsas_dev_reset_complete + * + * Completion for TARGET_RESET after NOT_RESPONDING_EVENT, + * enable work queue to finish off removing device from upper layers. + * then send next TARGET_RESET in the queue. + * + * @ioc * - * Completion for TARGET_RESET after NOT_RESPONDING_EVENT, enable work - * queue to finish off removing device from upper layers. then send next - * TARGET_RESET in the queue. **/ -static int -mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) +static void +mptsas_dev_reset_complete(MPT_ADAPTER *ioc) { MPT_SCSI_HOST *hd = shost_priv(ioc->sh); struct list_head *head = &hd->target_reset_list; + struct mptsas_target_reset_event *target_reset_list; + struct mptsas_hotplug_event *ev; + EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data; u8 id, channel; - struct mptsas_target_reset_event *target_reset_list; - SCSITaskMgmtReply_t *pScsiTmReply; - - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed: " - "(mf = %p, mr = %p)\n", ioc->name, mf, mr)); - - pScsiTmReply = (SCSITaskMgmtReply_t *)mr; - if (pScsiTmReply) { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "\tTaskMgmt completed: fw_channel = %d, fw_id = %d,\n" - "\ttask_type = 0x%02X, iocstatus = 0x%04X " - "loginfo = 0x%08X,\n\tresponse_code = 0x%02X, " - "term_cmnds = %d\n", ioc->name, - pScsiTmReply->Bus, pScsiTmReply->TargetID, - pScsiTmReply->TaskType, - le16_to_cpu(pScsiTmReply->IOCStatus), - le32_to_cpu(pScsiTmReply->IOCLogInfo), - pScsiTmReply->ResponseCode, - le32_to_cpu(pScsiTmReply->TerminationCount))); - - if (pScsiTmReply->ResponseCode) - mptscsih_taskmgmt_response_code(ioc, - pScsiTmReply->ResponseCode); - } - - if (pScsiTmReply && (pScsiTmReply->TaskType == - MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK || pScsiTmReply->TaskType == - MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET)) { - ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; - ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID; - memcpy(ioc->taskmgmt_cmds.reply, mr, - min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength)); - if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) { - ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING; - complete(&ioc->taskmgmt_cmds.done); - return 1; - } - return 0; - } - - mpt_clear_taskmgmt_in_progress_flag(ioc); + __le64 sas_address; if (list_empty(head)) - return 1; - - target_reset_list = list_entry(head->next, - struct mptsas_target_reset_event, list); + return; - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "TaskMgmt: completed (%d seconds)\n", - ioc->name, jiffies_to_msecs(jiffies - - target_reset_list->time_count)/1000)); + target_reset_list = list_entry(head->next, struct mptsas_target_reset_event, list); - id = pScsiTmReply->TargetID; - channel = pScsiTmReply->Bus; - target_reset_list->time_count = jiffies; + sas_event_data = &target_reset_list->sas_event_data; + id = sas_event_data->TargetID; + channel = sas_event_data->Bus; + hd->resetPending = 0; /* * retry target reset */ if (!target_reset_list->target_reset_issued) { - if (mptsas_target_reset(ioc, channel, id)) + if (mptsas_target_reset(ioc, channel, id)) { target_reset_list->target_reset_issued = 1; - return 1; + hd->resetPending = 1; + } + return; } /* * enable work queue to remove device from upper layers */ list_del(&target_reset_list->list); - if ((mptsas_find_vtarget(ioc, channel, id)) && !ioc->fw_events_off) - mptsas_queue_device_delete(ioc, - &target_reset_list->sas_event_data); + ev = kzalloc(sizeof(*ev), GFP_ATOMIC); + if (!ev) { + dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n", + ioc->name,__func__, __LINE__)); + return; + } + + INIT_WORK(&ev->work, mptsas_hotplug_work); + ev->ioc = ioc; + ev->handle = le16_to_cpu(sas_event_data->DevHandle); + ev->parent_handle = + le16_to_cpu(sas_event_data->ParentDevHandle); + ev->channel = channel; + ev->id =id; + ev->phy_id = sas_event_data->PhyNum; + memcpy(&sas_address, &sas_event_data->SASAddress, + sizeof(__le64)); + ev->sas_address = le64_to_cpu(sas_address); + ev->device_info = le32_to_cpu(sas_event_data->DeviceInfo); + ev->event_type = MPTSAS_DEL_DEVICE; + schedule_work(&ev->work); + kfree(target_reset_list); /* * issue target reset to next device in the queue @@ -1217,19 +699,34 @@ mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) head = &hd->target_reset_list; if (list_empty(head)) - return 1; + return; target_reset_list = list_entry(head->next, struct mptsas_target_reset_event, list); - id = target_reset_list->sas_event_data.TargetID; - channel = target_reset_list->sas_event_data.Bus; - target_reset_list->time_count = jiffies; + sas_event_data = &target_reset_list->sas_event_data; + id = sas_event_data->TargetID; + channel = sas_event_data->Bus; - if (mptsas_target_reset(ioc, channel, id)) + if (mptsas_target_reset(ioc, channel, id)) { target_reset_list->target_reset_issued = 1; + hd->resetPending = 1; + } +} - return 1; +/** + * mptsas_taskmgmt_complete + * + * @ioc + * @mf + * @mr + * + **/ +static int +mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) +{ + mptsas_dev_reset_complete(ioc); + return mptscsih_taskmgmt_complete(ioc, mf, mr); } /** @@ -1243,59 +740,37 @@ static int mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) { MPT_SCSI_HOST *hd; + struct mptsas_target_reset_event *target_reset_list, *n; int rc; rc = mptscsih_ioc_reset(ioc, reset_phase); - if ((ioc->bus_type != SAS) || (!rc)) - return rc; + if (ioc->bus_type != SAS) + goto out; + + if (reset_phase != MPT_IOC_POST_RESET) + goto out; + + if (!ioc->sh || !ioc->sh->hostdata) + goto out; hd = shost_priv(ioc->sh); if (!hd->ioc) goto out; - switch (reset_phase) { - case MPT_IOC_SETUP_RESET: - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__)); - mptsas_fw_event_off(ioc); - break; - case MPT_IOC_PRE_RESET: - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__)); - break; - case MPT_IOC_POST_RESET: - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__)); - if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) { - ioc->sas_mgmt.status |= MPT_MGMT_STATUS_DID_IOCRESET; - complete(&ioc->sas_mgmt.done); - } - mptsas_cleanup_fw_event_q(ioc); - mptsas_queue_rescan(ioc); - mptsas_fw_event_on(ioc); - break; - default: - break; + if (list_empty(&hd->target_reset_list)) + goto out; + + /* flush the target_reset_list */ + list_for_each_entry_safe(target_reset_list, n, + &hd->target_reset_list, list) { + list_del(&target_reset_list->list); + kfree(target_reset_list); } out: return rc; } - -/** - * enum device_state - - * @DEVICE_RETRY: need to retry the TUR - * @DEVICE_ERROR: TUR return error, don't add device - * @DEVICE_READY: device can be added - * - */ -enum device_state{ - DEVICE_RETRY, - DEVICE_ERROR, - DEVICE_READY, -}; - static int mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure, u32 form, u32 form_specific) @@ -1361,308 +836,15 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure, return error; } -/** - * mptsas_add_end_device - report a new end device to sas transport layer - * @ioc: Pointer to MPT_ADAPTER structure - * @phy_info: decribes attached device - * - * return (0) success (1) failure - * - **/ -static int -mptsas_add_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info) -{ - struct sas_rphy *rphy; - struct sas_port *port; - struct sas_identify identify; - char *ds = NULL; - u8 fw_id; - - if (!phy_info) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: exit at line=%d\n", ioc->name, - __func__, __LINE__)); - return 1; - } - - fw_id = phy_info->attached.id; - - if (mptsas_get_rphy(phy_info)) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: fw_id=%d exit at line=%d\n", ioc->name, - __func__, fw_id, __LINE__)); - return 2; - } - - port = mptsas_get_port(phy_info); - if (!port) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: fw_id=%d exit at line=%d\n", ioc->name, - __func__, fw_id, __LINE__)); - return 3; - } - - if (phy_info->attached.device_info & - MPI_SAS_DEVICE_INFO_SSP_TARGET) - ds = "ssp"; - if (phy_info->attached.device_info & - MPI_SAS_DEVICE_INFO_STP_TARGET) - ds = "stp"; - if (phy_info->attached.device_info & - MPI_SAS_DEVICE_INFO_SATA_DEVICE) - ds = "sata"; - - printk(MYIOC_s_INFO_FMT "attaching %s device: fw_channel %d, fw_id %d," - " phy %d, sas_addr 0x%llx\n", ioc->name, ds, - phy_info->attached.channel, phy_info->attached.id, - phy_info->attached.phy_id, (unsigned long long) - phy_info->attached.sas_address); - - mptsas_parse_device_info(&identify, &phy_info->attached); - rphy = sas_end_device_alloc(port); - if (!rphy) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: fw_id=%d exit at line=%d\n", ioc->name, - __func__, fw_id, __LINE__)); - return 5; /* non-fatal: an rphy can be added later */ - } - - rphy->identify = identify; - if (sas_rphy_add(rphy)) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: fw_id=%d exit at line=%d\n", ioc->name, - __func__, fw_id, __LINE__)); - sas_rphy_free(rphy); - return 6; - } - mptsas_set_rphy(ioc, phy_info, rphy); - return 0; -} - -/** - * mptsas_del_end_device - report a deleted end device to sas transport layer - * @ioc: Pointer to MPT_ADAPTER structure - * @phy_info: decribes attached device - * - **/ -static void -mptsas_del_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info) -{ - struct sas_rphy *rphy; - struct sas_port *port; - struct mptsas_portinfo *port_info; - struct mptsas_phyinfo *phy_info_parent; - int i; - char *ds = NULL; - u8 fw_id; - u64 sas_address; - - if (!phy_info) - return; - - fw_id = phy_info->attached.id; - sas_address = phy_info->attached.sas_address; - - if (!phy_info->port_details) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: fw_id=%d exit at line=%d\n", ioc->name, - __func__, fw_id, __LINE__)); - return; - } - rphy = mptsas_get_rphy(phy_info); - if (!rphy) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: fw_id=%d exit at line=%d\n", ioc->name, - __func__, fw_id, __LINE__)); - return; - } - - if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SSP_INITIATOR - || phy_info->attached.device_info - & MPI_SAS_DEVICE_INFO_SMP_INITIATOR - || phy_info->attached.device_info - & MPI_SAS_DEVICE_INFO_STP_INITIATOR) - ds = "initiator"; - if (phy_info->attached.device_info & - MPI_SAS_DEVICE_INFO_SSP_TARGET) - ds = "ssp"; - if (phy_info->attached.device_info & - MPI_SAS_DEVICE_INFO_STP_TARGET) - ds = "stp"; - if (phy_info->attached.device_info & - MPI_SAS_DEVICE_INFO_SATA_DEVICE) - ds = "sata"; - - dev_printk(KERN_DEBUG, &rphy->dev, MYIOC_s_FMT - "removing %s device: fw_channel %d, fw_id %d, phy %d," - "sas_addr 0x%llx\n", ioc->name, ds, phy_info->attached.channel, - phy_info->attached.id, phy_info->attached.phy_id, - (unsigned long long) sas_address); - - port = mptsas_get_port(phy_info); - if (!port) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: fw_id=%d exit at line=%d\n", ioc->name, - __func__, fw_id, __LINE__)); - return; - } - port_info = phy_info->portinfo; - phy_info_parent = port_info->phy_info; - for (i = 0; i < port_info->num_phys; i++, phy_info_parent++) { - if (!phy_info_parent->phy) - continue; - if (phy_info_parent->attached.sas_address != - sas_address) - continue; - dev_printk(KERN_DEBUG, &phy_info_parent->phy->dev, - MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n", - ioc->name, phy_info_parent->phy_id, - phy_info_parent->phy); - sas_port_delete_phy(port, phy_info_parent->phy); - } - - dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT - "delete port %d, sas_addr (0x%llx)\n", ioc->name, - port->port_identifier, (unsigned long long)sas_address); - sas_port_delete(port); - mptsas_set_port(ioc, phy_info, NULL); - mptsas_port_delete(ioc, phy_info->port_details); -} - -struct mptsas_phyinfo * -mptsas_refreshing_device_handles(MPT_ADAPTER *ioc, - struct mptsas_devinfo *sas_device) -{ - struct mptsas_phyinfo *phy_info; - struct mptsas_portinfo *port_info; - int i; - - phy_info = mptsas_find_phyinfo_by_sas_address(ioc, - sas_device->sas_address); - if (!phy_info) - goto out; - port_info = phy_info->portinfo; - if (!port_info) - goto out; - mutex_lock(&ioc->sas_topology_mutex); - for (i = 0; i < port_info->num_phys; i++) { - if (port_info->phy_info[i].attached.sas_address != - sas_device->sas_address) - continue; - port_info->phy_info[i].attached.channel = sas_device->channel; - port_info->phy_info[i].attached.id = sas_device->id; - port_info->phy_info[i].attached.sas_address = - sas_device->sas_address; - port_info->phy_info[i].attached.handle = sas_device->handle; - port_info->phy_info[i].attached.handle_parent = - sas_device->handle_parent; - port_info->phy_info[i].attached.handle_enclosure = - sas_device->handle_enclosure; - } - mutex_unlock(&ioc->sas_topology_mutex); - out: - return phy_info; -} - -/** - * mptsas_firmware_event_work - work thread for processing fw events - * @work: work queue payload containing info describing the event - * Context: user - * - */ -static void -mptsas_firmware_event_work(struct work_struct *work) -{ - struct fw_event_work *fw_event = - container_of(work, struct fw_event_work, work.work); - MPT_ADAPTER *ioc = fw_event->ioc; - - /* special rescan topology handling */ - if (fw_event->event == -1) { - if (ioc->in_rescan) { - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: rescan ignored as it is in progress\n", - ioc->name, __func__)); - return; - } - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: rescan after " - "reset\n", ioc->name, __func__)); - ioc->in_rescan = 1; - mptsas_not_responding_devices(ioc); - mptsas_scan_sas_topology(ioc); - ioc->in_rescan = 0; - mptsas_free_fw_event(ioc, fw_event); - return; - } - - /* events handling turned off during host reset */ - if (ioc->fw_events_off) { - mptsas_free_fw_event(ioc, fw_event); - return; - } - - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: fw_event=(0x%p), " - "event = (0x%02x)\n", ioc->name, __func__, fw_event, - (fw_event->event & 0xFF))); - - switch (fw_event->event) { - case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: - mptsas_send_sas_event(fw_event); - break; - case MPI_EVENT_INTEGRATED_RAID: - mptsas_send_raid_event(fw_event); - break; - case MPI_EVENT_IR2: - mptsas_send_ir2_event(fw_event); - break; - case MPI_EVENT_PERSISTENT_TABLE_FULL: - mptbase_sas_persist_operation(ioc, - MPI_SAS_OP_CLEAR_NOT_PRESENT); - mptsas_free_fw_event(ioc, fw_event); - break; - case MPI_EVENT_SAS_BROADCAST_PRIMITIVE: - mptsas_broadcast_primative_work(fw_event); - break; - case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE: - mptsas_send_expander_event(fw_event); - break; - case MPI_EVENT_SAS_PHY_LINK_STATUS: - mptsas_send_link_status_event(fw_event); - break; - case MPI_EVENT_QUEUE_FULL: - mptsas_handle_queue_full_event(fw_event); - break; - } -} - - - static int mptsas_slave_configure(struct scsi_device *sdev) { - struct Scsi_Host *host = sdev->host; - MPT_SCSI_HOST *hd = shost_priv(host); - MPT_ADAPTER *ioc = hd->ioc; - VirtDevice *vdevice = sdev->hostdata; - - if (vdevice->vtarget->deleted) { - sdev_printk(KERN_INFO, sdev, "clearing deleted flag\n"); - vdevice->vtarget->deleted = 0; - } - /* - * RAID volumes placed beyond the last expected port. - * Ignore sending sas mode pages in that case.. - */ - if (sdev->channel == MPTSAS_RAID_CHANNEL) { - mptsas_add_device_component_starget_ir(ioc, scsi_target(sdev)); + if (sdev->channel == MPTSAS_RAID_CHANNEL) goto out; - } sas_read_port_mode_page(sdev); - mptsas_add_device_component_starget(ioc, scsi_target(sdev)); - out: return mptscsih_slave_configure(sdev); } @@ -1693,18 +875,9 @@ mptsas_target_alloc(struct scsi_target *starget) * RAID volumes placed beyond the last expected port. */ if (starget->channel == MPTSAS_RAID_CHANNEL) { - if (!ioc->raid_data.pIocPg2) { - kfree(vtarget); - return -ENXIO; - } - for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { - if (id == ioc->raid_data.pIocPg2-> - RaidVolume[i].VolumeID) { - channel = ioc->raid_data.pIocPg2-> - RaidVolume[i].VolumeBus; - } - } - vtarget->raidVolume = 1; + for (i=0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) + if (id == ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID) + channel = ioc->raid_data.pIocPg2->RaidVolume[i].VolumeBus; goto out; } @@ -1753,18 +926,11 @@ mptsas_target_destroy(struct scsi_target *starget) struct sas_rphy *rphy; struct mptsas_portinfo *p; int i; - MPT_ADAPTER *ioc = hd->ioc; - VirtTarget *vtarget; + MPT_ADAPTER *ioc = hd->ioc; if (!starget->hostdata) return; - vtarget = starget->hostdata; - - mptsas_del_device_component_by_os(ioc, starget->channel, - starget->id); - - if (starget->channel == MPTSAS_RAID_CHANNEL) goto out; @@ -1774,21 +940,12 @@ mptsas_target_destroy(struct scsi_target *starget) if (p->phy_info[i].attached.sas_address != rphy->identify.sas_address) continue; - - starget_printk(KERN_INFO, starget, MYIOC_s_FMT - "delete device: fw_channel %d, fw_id %d, phy %d, " - "sas_addr 0x%llx\n", ioc->name, - p->phy_info[i].attached.channel, - p->phy_info[i].attached.id, - p->phy_info[i].attached.phy_id, (unsigned long long) - p->phy_info[i].attached.sas_address); - mptsas_set_starget(&p->phy_info[i], NULL); + goto out; } } out: - vtarget->starget = NULL; kfree(starget->hostdata); starget->hostdata = NULL; } @@ -1851,8 +1008,6 @@ mptsas_slave_alloc(struct scsi_device *sdev) static int mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { - MPT_SCSI_HOST *hd; - MPT_ADAPTER *ioc; VirtDevice *vdevice = SCpnt->device->hostdata; if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) { @@ -1861,12 +1016,6 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) return 0; } - hd = shost_priv(SCpnt->device->host); - ioc = hd->ioc; - - if (ioc->sas_discovery_quiesce_io) - return SCSI_MLQUEUE_HOST_BUSY; - // scsi_print_command(SCpnt); return mptscsih_qcmd(SCpnt,done); @@ -1965,19 +1114,14 @@ static int mptsas_get_linkerrors(struct sas_phy *phy) static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) { - ioc->sas_mgmt.status |= MPT_MGMT_STATUS_COMMAND_GOOD; + ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_COMMAND_GOOD; if (reply != NULL) { - ioc->sas_mgmt.status |= MPT_MGMT_STATUS_RF_VALID; + ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_RF_VALID; memcpy(ioc->sas_mgmt.reply, reply, min(ioc->reply_sz, 4 * reply->u.reply.MsgLength)); } - - if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) { - ioc->sas_mgmt.status &= ~MPT_MGMT_STATUS_PENDING; - complete(&ioc->sas_mgmt.done); - return 1; - } - return 0; + complete(&ioc->sas_mgmt.done); + return 1; } static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset) @@ -2016,7 +1160,6 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset) MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET; req->PhyNum = phy->identify.phy_identifier; - INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status) mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, @@ -2031,7 +1174,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset) /* a reply frame is expected */ if ((ioc->sas_mgmt.status & - MPT_MGMT_STATUS_RF_VALID) == 0) { + MPT_IOCTL_STATUS_RF_VALID) == 0) { error = -ENXIO; goto out_unlock; } @@ -2048,7 +1191,6 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset) error = 0; out_unlock: - CLEAR_MGMT_STATUS(ioc->sas_mgmt.status) mutex_unlock(&ioc->sas_mgmt.mutex); out: return error; @@ -2162,7 +1304,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, struct mptsas_portinfo *port_info; mutex_lock(&ioc->sas_topology_mutex); - port_info = ioc->hba_port_info; + port_info = mptsas_get_hba_portinfo(ioc); if (port_info && port_info->phy_info) sas_address = port_info->phy_info[0].phy->identify.sas_address; @@ -2177,32 +1319,26 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, /* request */ flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | - MPI_SGE_FLAGS_DIRECTION) - << MPI_SGE_FLAGS_SHIFT; + MPI_SGE_FLAGS_DIRECTION | + mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT; flagsLength |= (blk_rq_bytes(req) - 4); dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); if (!dma_addr_out) goto put_mf; - ioc->add_sge(psge, flagsLength, dma_addr_out); - psge += ioc->SGE_size; + mpt_add_sge(psge, flagsLength, dma_addr_out); + psge += (sizeof(u32) + sizeof(dma_addr_t)); /* response */ - flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT | - MPI_SGE_FLAGS_SYSTEM_ADDRESS | - MPI_SGE_FLAGS_IOC_TO_HOST | - MPI_SGE_FLAGS_END_OF_BUFFER; - - flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT; + flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; flagsLength |= blk_rq_bytes(rsp) + 4; dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); if (!dma_addr_in) goto unmap; - ioc->add_sge(psge, flagsLength, dma_addr_in); + mpt_add_sge(psge, flagsLength, dma_addr_in); - INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status) mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf); timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ); @@ -2215,7 +1351,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, } mf = NULL; - if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) { + if (ioc->sas_mgmt.status & MPT_IOCTL_STATUS_RF_VALID) { SmpPassthroughReply_t *smprep; smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; @@ -2224,8 +1360,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, req->resid_len = 0; rsp->resid_len -= smprep->ResponseDataLength; } else { - printk(MYIOC_s_ERR_FMT - "%s: smp passthru reply failed to be returned\n", + printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", ioc->name, __func__); ret = -ENXIO; } @@ -2240,7 +1375,6 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, if (mf) mpt_free_msg_frame(ioc, mf); out_unlock: - CLEAR_MGMT_STATUS(ioc->sas_mgmt.status) mutex_unlock(&ioc->sas_mgmt.mutex); out: return ret; @@ -2304,7 +1438,7 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info) port_info->num_phys = buffer->NumPhys; port_info->phy_info = kcalloc(port_info->num_phys, - sizeof(struct mptsas_phyinfo), GFP_KERNEL); + sizeof(*port_info->phy_info),GFP_KERNEL); if (!port_info->phy_info) { error = -ENOMEM; goto out_free_consistent; @@ -2466,6 +1600,10 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info, __le64 sas_address; int error=0; + if (ioc->sas_discovery_runtime && + mptsas_is_end_device(device_info)) + goto out; + hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION; hdr.ExtPageLength = 0; hdr.PageNumber = 0; @@ -2506,7 +1644,6 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info, mptsas_print_device_pg0(ioc, buffer); - memset(device_info, 0, sizeof(struct mptsas_devinfo)); device_info->handle = le16_to_cpu(buffer->DevHandle); device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle); device_info->handle_enclosure = @@ -2538,9 +1675,7 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info, SasExpanderPage0_t *buffer; dma_addr_t dma_handle; int i, error; - __le64 sas_address; - memset(port_info, 0, sizeof(struct mptsas_portinfo)); hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION; hdr.ExtPageLength = 0; hdr.PageNumber = 0; @@ -2586,23 +1721,18 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info, } /* save config data */ - port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1; + port_info->num_phys = buffer->NumPhys; port_info->phy_info = kcalloc(port_info->num_phys, - sizeof(struct mptsas_phyinfo), GFP_KERNEL); + sizeof(*port_info->phy_info),GFP_KERNEL); if (!port_info->phy_info) { error = -ENOMEM; goto out_free_consistent; } - memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64)); for (i = 0; i < port_info->num_phys; i++) { port_info->phy_info[i].portinfo = port_info; port_info->phy_info[i].handle = le16_to_cpu(buffer->DevHandle); - port_info->phy_info[i].identify.sas_address = - le64_to_cpu(sas_address); - port_info->phy_info[i].identify.handle_parent = - le16_to_cpu(buffer->ParentDevHandle); } out_free_consistent: @@ -2622,7 +1752,11 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, dma_addr_t dma_handle; int error=0; - hdr.PageVersion = MPI_SASEXPANDER1_PAGEVERSION; + if (ioc->sas_discovery_runtime && + mptsas_is_end_device(&phy_info->attached)) + goto out; + + hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION; hdr.ExtPageLength = 0; hdr.PageNumber = 1; hdr.Reserved1 = 0; @@ -2657,12 +1791,6 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT; error = mpt_config(ioc, &cfg); - - if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) { - error = -ENODEV; - goto out; - } - if (error) goto out_free_consistent; @@ -2882,21 +2010,16 @@ static int mptsas_probe_one_phy(struct device *dev, goto out; } mptsas_set_port(ioc, phy_info, port); - devtprintk(ioc, dev_printk(KERN_DEBUG, &port->dev, - MYIOC_s_FMT "add port %d, sas_addr (0x%llx)\n", - ioc->name, port->port_identifier, - (unsigned long long)phy_info-> - attached.sas_address)); + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "sas_port_alloc: port=%p dev=%p port_id=%d\n", + ioc->name, port, dev, port->port_identifier)); } - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "sas_port_add_phy: phy_id=%d\n", - ioc->name, phy_info->phy_id)); + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_port_add_phy: phy_id=%d\n", + ioc->name, phy_info->phy_id)); sas_port_add_phy(port, phy_info->phy); phy_info->sas_port_add_phy = 0; - devtprintk(ioc, dev_printk(KERN_DEBUG, &phy_info->phy->dev, - MYIOC_s_FMT "add phy %d, phy-obj (0x%p)\n", ioc->name, - phy_info->phy_id, phy_info->phy)); } + if (!mptsas_get_rphy(phy_info) && port && !port->rphy) { struct sas_rphy *rphy; @@ -2909,17 +2032,18 @@ static int mptsas_probe_one_phy(struct device *dev, * the adding/removing of devices that occur * after start of day. */ - if (mptsas_is_end_device(&phy_info->attached) && - phy_info->attached.handle_parent) { - goto out; - } + if (ioc->sas_discovery_runtime && + mptsas_is_end_device(&phy_info->attached)) + goto out; mptsas_parse_device_info(&identify, &phy_info->attached); if (scsi_is_host_device(parent)) { struct mptsas_portinfo *port_info; int i; - port_info = ioc->hba_port_info; + mutex_lock(&ioc->sas_topology_mutex); + port_info = mptsas_get_hba_portinfo(ioc); + mutex_unlock(&ioc->sas_topology_mutex); for (i = 0; i < port_info->num_phys; i++) if (port_info->phy_info[i].identify.sas_address == @@ -2978,7 +2102,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc) struct mptsas_portinfo *port_info, *hba; int error = -ENOMEM, i; - hba = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL); + hba = kzalloc(sizeof(*port_info), GFP_KERNEL); if (! hba) goto out; @@ -2988,10 +2112,9 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc) mptsas_sas_io_unit_pg1(ioc); mutex_lock(&ioc->sas_topology_mutex); - port_info = ioc->hba_port_info; + port_info = mptsas_get_hba_portinfo(ioc); if (!port_info) { - ioc->hba_port_info = port_info = hba; - ioc->hba_port_num_phy = port_info->num_phys; + port_info = hba; list_add_tail(&port_info->list, &ioc->sas_topology); } else { for (i = 0; i < hba->num_phys; i++) { @@ -3007,22 +2130,15 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc) hba = NULL; } mutex_unlock(&ioc->sas_topology_mutex); -#if defined(CPQ_CIM) - ioc->num_ports = port_info->num_phys; -#endif for (i = 0; i < port_info->num_phys; i++) { mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i], (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << MPI_SAS_PHY_PGAD_FORM_SHIFT), i); - port_info->phy_info[i].identify.handle = - port_info->phy_info[i].handle; + mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify, (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), - port_info->phy_info[i].identify.handle); - if (!ioc->hba_port_sas_addr) - ioc->hba_port_sas_addr = - port_info->phy_info[i].identify.sas_address; + port_info->phy_info[i].handle); port_info->phy_info[i].identify.phy_id = port_info->phy_info[i].phy_id = i; if (port_info->phy_info[i].attached.handle) @@ -3047,721 +2163,248 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc) return error; } -static void -mptsas_expander_refresh(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info) +static int +mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle) { - struct mptsas_portinfo *parent; - struct device *parent_dev; - struct sas_rphy *rphy; - int i; - u64 sas_address; /* expander sas address */ - u32 handle; - - handle = port_info->phy_info[0].handle; - sas_address = port_info->phy_info[0].identify.sas_address; + struct mptsas_portinfo *port_info, *p, *ex; + struct device *parent; + struct sas_rphy *rphy; + int error = -ENOMEM, i, j; + + ex = kzalloc(sizeof(*port_info), GFP_KERNEL); + if (!ex) + goto out; + + error = mptsas_sas_expander_pg0(ioc, ex, + (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE << + MPI_SAS_EXPAND_PGAD_FORM_SHIFT), *handle); + if (error) + goto out_free_port_info; + + *handle = ex->phy_info[0].handle; + + mutex_lock(&ioc->sas_topology_mutex); + port_info = mptsas_find_portinfo_by_handle(ioc, *handle); + if (!port_info) { + port_info = ex; + list_add_tail(&port_info->list, &ioc->sas_topology); + } else { + for (i = 0; i < ex->num_phys; i++) { + port_info->phy_info[i].handle = + ex->phy_info[i].handle; + port_info->phy_info[i].port_id = + ex->phy_info[i].port_id; + } + kfree(ex->phy_info); + kfree(ex); + ex = NULL; + } + mutex_unlock(&ioc->sas_topology_mutex); + for (i = 0; i < port_info->num_phys; i++) { mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i], - (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM << - MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + handle); - - mptsas_sas_device_pg0(ioc, - &port_info->phy_info[i].identify, - (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << - MPI_SAS_DEVICE_PGAD_FORM_SHIFT), - port_info->phy_info[i].identify.handle); - port_info->phy_info[i].identify.phy_id = - port_info->phy_info[i].phy_id; + (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM << + MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + *handle); + + if (port_info->phy_info[i].identify.handle) { + mptsas_sas_device_pg0(ioc, + &port_info->phy_info[i].identify, + (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << + MPI_SAS_DEVICE_PGAD_FORM_SHIFT), + port_info->phy_info[i].identify.handle); + port_info->phy_info[i].identify.phy_id = + port_info->phy_info[i].phy_id; + } if (port_info->phy_info[i].attached.handle) { mptsas_sas_device_pg0(ioc, - &port_info->phy_info[i].attached, - (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << - MPI_SAS_DEVICE_PGAD_FORM_SHIFT), - port_info->phy_info[i].attached.handle); + &port_info->phy_info[i].attached, + (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << + MPI_SAS_DEVICE_PGAD_FORM_SHIFT), + port_info->phy_info[i].attached.handle); port_info->phy_info[i].attached.phy_id = port_info->phy_info[i].phy_id; } } - mutex_lock(&ioc->sas_topology_mutex); - parent = mptsas_find_portinfo_by_handle(ioc, - port_info->phy_info[0].identify.handle_parent); - if (!parent) { - mutex_unlock(&ioc->sas_topology_mutex); - return; - } - for (i = 0, parent_dev = NULL; i < parent->num_phys && !parent_dev; - i++) { - if (parent->phy_info[i].attached.sas_address == sas_address) { - rphy = mptsas_get_rphy(&parent->phy_info[i]); - parent_dev = &rphy->dev; + parent = &ioc->sh->shost_gendev; + for (i = 0; i < port_info->num_phys; i++) { + mutex_lock(&ioc->sas_topology_mutex); + list_for_each_entry(p, &ioc->sas_topology, list) { + for (j = 0; j < p->num_phys; j++) { + if (port_info->phy_info[i].identify.handle != + p->phy_info[j].attached.handle) + continue; + rphy = mptsas_get_rphy(&p->phy_info[j]); + parent = &rphy->dev; + } } + mutex_unlock(&ioc->sas_topology_mutex); } - mutex_unlock(&ioc->sas_topology_mutex); mptsas_setup_wide_ports(ioc, port_info); + for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++) - mptsas_probe_one_phy(parent_dev, &port_info->phy_info[i], + mptsas_probe_one_phy(parent, &port_info->phy_info[i], ioc->sas_index, 0); + + return 0; + + out_free_port_info: + if (ex) { + kfree(ex->phy_info); + kfree(ex); + } + out: + return error; } +/* + * mptsas_delete_expander_phys + * + * + * This will traverse topology, and remove expanders + * that are no longer present + */ static void -mptsas_expander_event_add(MPT_ADAPTER *ioc, - MpiEventDataSasExpanderStatusChange_t *expander_data) +mptsas_delete_expander_phys(MPT_ADAPTER *ioc) { - struct mptsas_portinfo *port_info; + struct mptsas_portinfo buffer; + struct mptsas_portinfo *port_info, *n, *parent; + struct mptsas_phyinfo *phy_info; + struct sas_port * port; int i; - __le64 sas_address; - - port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL); - if (!port_info) - BUG(); - port_info->num_phys = (expander_data->NumPhys) ? - expander_data->NumPhys : 1; - port_info->phy_info = kcalloc(port_info->num_phys, - sizeof(struct mptsas_phyinfo), GFP_KERNEL); - if (!port_info->phy_info) - BUG(); - memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64)); - for (i = 0; i < port_info->num_phys; i++) { - port_info->phy_info[i].portinfo = port_info; - port_info->phy_info[i].handle = - le16_to_cpu(expander_data->DevHandle); - port_info->phy_info[i].identify.sas_address = - le64_to_cpu(sas_address); - port_info->phy_info[i].identify.handle_parent = - le16_to_cpu(expander_data->ParentDevHandle); - } + u64 expander_sas_address; mutex_lock(&ioc->sas_topology_mutex); - list_add_tail(&port_info->list, &ioc->sas_topology); - mutex_unlock(&ioc->sas_topology_mutex); + list_for_each_entry_safe(port_info, n, &ioc->sas_topology, list) { - printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, " - "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys, - (unsigned long long)sas_address); + if (!(port_info->phy_info[0].identify.device_info & + MPI_SAS_DEVICE_INFO_SMP_TARGET)) + continue; - mptsas_expander_refresh(ioc, port_info); -} + if (mptsas_sas_expander_pg0(ioc, &buffer, + (MPI_SAS_EXPAND_PGAD_FORM_HANDLE << + MPI_SAS_EXPAND_PGAD_FORM_SHIFT), + port_info->phy_info[0].handle)) { -/** - * mptsas_delete_expander_siblings - remove siblings attached to expander - * @ioc: Pointer to MPT_ADAPTER structure - * @parent: the parent port_info object - * @expander: the expander port_info object - **/ -static void -mptsas_delete_expander_siblings(MPT_ADAPTER *ioc, struct mptsas_portinfo - *parent, struct mptsas_portinfo *expander) -{ - struct mptsas_phyinfo *phy_info; - struct mptsas_portinfo *port_info; - struct sas_rphy *rphy; - int i; - - phy_info = expander->phy_info; - for (i = 0; i < expander->num_phys; i++, phy_info++) { - rphy = mptsas_get_rphy(phy_info); - if (!rphy) - continue; - if (rphy->identify.device_type == SAS_END_DEVICE) - mptsas_del_end_device(ioc, phy_info); - } - - phy_info = expander->phy_info; - for (i = 0; i < expander->num_phys; i++, phy_info++) { - rphy = mptsas_get_rphy(phy_info); - if (!rphy) - continue; - if (rphy->identify.device_type == - MPI_SAS_DEVICE_INFO_EDGE_EXPANDER || - rphy->identify.device_type == - MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER) { - port_info = mptsas_find_portinfo_by_sas_address(ioc, - rphy->identify.sas_address); - if (!port_info) - continue; - if (port_info == parent) /* backlink rphy */ - continue; /* - Delete this expander even if the expdevpage is exists - because the parent expander is already deleted - */ - mptsas_expander_delete(ioc, port_info, 1); - } - } -} - - -/** - * mptsas_expander_delete - remove this expander - * @ioc: Pointer to MPT_ADAPTER structure - * @port_info: expander port_info struct - * @force: Flag to forcefully delete the expander - * - **/ - -static void mptsas_expander_delete(MPT_ADAPTER *ioc, - struct mptsas_portinfo *port_info, u8 force) -{ - - struct mptsas_portinfo *parent; - int i; - u64 expander_sas_address; - struct mptsas_phyinfo *phy_info; - struct mptsas_portinfo buffer; - struct mptsas_portinfo_details *port_details; - struct sas_port *port; - - if (!port_info) - return; - - /* see if expander is still there before deleting */ - mptsas_sas_expander_pg0(ioc, &buffer, - (MPI_SAS_EXPAND_PGAD_FORM_HANDLE << - MPI_SAS_EXPAND_PGAD_FORM_SHIFT), - port_info->phy_info[0].identify.handle); - - if (buffer.num_phys) { - kfree(buffer.phy_info); - if (!force) - return; - } - - - /* - * Obtain the port_info instance to the parent port - */ - port_details = NULL; - expander_sas_address = - port_info->phy_info[0].identify.sas_address; - parent = mptsas_find_portinfo_by_handle(ioc, - port_info->phy_info[0].identify.handle_parent); - mptsas_delete_expander_siblings(ioc, parent, port_info); - if (!parent) - goto out; - - /* - * Delete rphys in the parent that point - * to this expander. - */ - phy_info = parent->phy_info; - port = NULL; - for (i = 0; i < parent->num_phys; i++, phy_info++) { - if (!phy_info->phy) - continue; - if (phy_info->attached.sas_address != - expander_sas_address) - continue; - if (!port) { - port = mptsas_get_port(phy_info); - port_details = phy_info->port_details; - } - dev_printk(KERN_DEBUG, &phy_info->phy->dev, - MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n", ioc->name, - phy_info->phy_id, phy_info->phy); - sas_port_delete_phy(port, phy_info->phy); - } - if (port) { - dev_printk(KERN_DEBUG, &port->dev, - MYIOC_s_FMT "delete port %d, sas_addr (0x%llx)\n", - ioc->name, port->port_identifier, - (unsigned long long)expander_sas_address); - sas_port_delete(port); - mptsas_port_delete(ioc, port_details); - } - out: - - printk(MYIOC_s_INFO_FMT "delete expander: num_phys %d, " - "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys, - (unsigned long long)expander_sas_address); - - /* - * free link - */ - list_del(&port_info->list); - kfree(port_info->phy_info); - kfree(port_info); -} - - -/** - * mptsas_send_expander_event - expanders events - * @ioc: Pointer to MPT_ADAPTER structure - * @expander_data: event data - * - * - * This function handles adding, removing, and refreshing - * device handles within the expander objects. - */ -static void -mptsas_send_expander_event(struct fw_event_work *fw_event) -{ - MPT_ADAPTER *ioc; - MpiEventDataSasExpanderStatusChange_t *expander_data; - struct mptsas_portinfo *port_info; - __le64 sas_address; - int i; - - ioc = fw_event->ioc; - expander_data = (MpiEventDataSasExpanderStatusChange_t *) - fw_event->event_data; - memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64)); - port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address); - - if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) { - if (port_info) { - for (i = 0; i < port_info->num_phys; i++) { - port_info->phy_info[i].portinfo = port_info; - port_info->phy_info[i].handle = - le16_to_cpu(expander_data->DevHandle); - port_info->phy_info[i].identify.sas_address = - le64_to_cpu(sas_address); - port_info->phy_info[i].identify.handle_parent = - le16_to_cpu(expander_data->ParentDevHandle); - } - mptsas_expander_refresh(ioc, port_info); - } else if (!port_info && expander_data->NumPhys) - mptsas_expander_event_add(ioc, expander_data); - } else if (expander_data->ReasonCode == - MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING) - mptsas_expander_delete(ioc, port_info, 0); - - mptsas_free_fw_event(ioc, fw_event); -} - - -/** - * mptsas_expander_add - - * @ioc: Pointer to MPT_ADAPTER structure - * @handle: - * - */ -struct mptsas_portinfo * -mptsas_expander_add(MPT_ADAPTER *ioc, u16 handle) -{ - struct mptsas_portinfo buffer, *port_info; - int i; - - if ((mptsas_sas_expander_pg0(ioc, &buffer, - (MPI_SAS_EXPAND_PGAD_FORM_HANDLE << - MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle))) - return NULL; - - port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_ATOMIC); - if (!port_info) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: exit at line=%d\n", ioc->name, - __func__, __LINE__)); - return NULL; - } - port_info->num_phys = buffer.num_phys; - port_info->phy_info = buffer.phy_info; - for (i = 0; i < port_info->num_phys; i++) - port_info->phy_info[i].portinfo = port_info; - mutex_lock(&ioc->sas_topology_mutex); - list_add_tail(&port_info->list, &ioc->sas_topology); - mutex_unlock(&ioc->sas_topology_mutex); - printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, " - "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys, - (unsigned long long)buffer.phy_info[0].identify.sas_address); - mptsas_expander_refresh(ioc, port_info); - return port_info; -} - -static void -mptsas_send_link_status_event(struct fw_event_work *fw_event) -{ - MPT_ADAPTER *ioc; - MpiEventDataSasPhyLinkStatus_t *link_data; - struct mptsas_portinfo *port_info; - struct mptsas_phyinfo *phy_info = NULL; - __le64 sas_address; - u8 phy_num; - u8 link_rate; - - ioc = fw_event->ioc; - link_data = (MpiEventDataSasPhyLinkStatus_t *)fw_event->event_data; - - memcpy(&sas_address, &link_data->SASAddress, sizeof(__le64)); - sas_address = le64_to_cpu(sas_address); - link_rate = link_data->LinkRates >> 4; - phy_num = link_data->PhyNum; - - port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address); - if (port_info) { - phy_info = &port_info->phy_info[phy_num]; - if (phy_info) - phy_info->negotiated_link_rate = link_rate; - } - - if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 || - link_rate == MPI_SAS_IOUNIT0_RATE_3_0) { - - if (!port_info) { - if (ioc->old_sas_discovery_protocal) { - port_info = mptsas_expander_add(ioc, - le16_to_cpu(link_data->DevHandle)); - if (port_info) - goto out; - } - goto out; - } - - if (port_info == ioc->hba_port_info) - mptsas_probe_hba_phys(ioc); - else - mptsas_expander_refresh(ioc, port_info); - } else if (phy_info && phy_info->phy) { - if (link_rate == MPI_SAS_IOUNIT0_RATE_PHY_DISABLED) - phy_info->phy->negotiated_linkrate = - SAS_PHY_DISABLED; - else if (link_rate == - MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION) - phy_info->phy->negotiated_linkrate = - SAS_LINK_RATE_FAILED; - else - phy_info->phy->negotiated_linkrate = - SAS_LINK_RATE_UNKNOWN; - } - out: - mptsas_free_fw_event(ioc, fw_event); -} - -static void -mptsas_not_responding_devices(MPT_ADAPTER *ioc) -{ - struct mptsas_portinfo buffer, *port_info; - struct mptsas_device_info *sas_info; - struct mptsas_devinfo sas_device; - u32 handle; - VirtTarget *vtarget = NULL; - struct mptsas_phyinfo *phy_info; - u8 found_expander; - int retval, retry_count; - unsigned long flags; - - mpt_findImVolumes(ioc); - - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); - if (ioc->ioc_reset_in_progress) { - dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: exiting due to a parallel reset \n", ioc->name, - __func__)); - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); - return; - } - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); - - /* devices, logical volumes */ - mutex_lock(&ioc->sas_device_info_mutex); - redo_device_scan: - list_for_each_entry(sas_info, &ioc->sas_device_info_list, list) { - if (sas_info->is_cached) - continue; - if (!sas_info->is_logical_volume) { - sas_device.handle = 0; - retry_count = 0; -retry_page: - retval = mptsas_sas_device_pg0(ioc, &sas_device, - (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID - << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), - (sas_info->fw.channel << 8) + - sas_info->fw.id); - - if (sas_device.handle) - continue; - if (retval == -EBUSY) { - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); - if (ioc->ioc_reset_in_progress) { - dfailprintk(ioc, - printk(MYIOC_s_DEBUG_FMT - "%s: exiting due to reset\n", - ioc->name, __func__)); - spin_unlock_irqrestore - (&ioc->taskmgmt_lock, flags); - mutex_unlock(&ioc-> - sas_device_info_mutex); - return; - } - spin_unlock_irqrestore(&ioc->taskmgmt_lock, - flags); - } - - if (retval && (retval != -ENODEV)) { - if (retry_count < 10) { - retry_count++; - goto retry_page; - } else { - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: Config page retry exceeded retry " - "count deleting device 0x%llx\n", - ioc->name, __func__, - sas_info->sas_address)); - } - } - - /* delete device */ - vtarget = mptsas_find_vtarget(ioc, - sas_info->fw.channel, sas_info->fw.id); + * Obtain the port_info instance to the parent port + */ + parent = mptsas_find_portinfo_by_handle(ioc, + port_info->phy_info[0].identify.handle_parent); - if (vtarget) - vtarget->deleted = 1; + if (!parent) + goto next_port; - phy_info = mptsas_find_phyinfo_by_sas_address(ioc, - sas_info->sas_address); + expander_sas_address = + port_info->phy_info[0].identify.sas_address; - if (phy_info) { - mptsas_del_end_device(ioc, phy_info); - goto redo_device_scan; + /* + * Delete rphys in the parent that point + * to this expander. The transport layer will + * cleanup all the children. + */ + phy_info = parent->phy_info; + for (i = 0; i < parent->num_phys; i++, phy_info++) { + port = mptsas_get_port(phy_info); + if (!port) + continue; + if (phy_info->attached.sas_address != + expander_sas_address) + continue; + dsaswideprintk(ioc, + dev_printk(KERN_DEBUG, &port->dev, + MYIOC_s_FMT "delete port (%d)\n", ioc->name, + port->port_identifier)); + sas_port_delete(port); + mptsas_port_delete(ioc, phy_info->port_details); } - } else - mptsas_volume_delete(ioc, sas_info->fw.id); - } - mutex_lock(&ioc->sas_device_info_mutex); + next_port: - /* expanders */ - mutex_lock(&ioc->sas_topology_mutex); - redo_expander_scan: - list_for_each_entry(port_info, &ioc->sas_topology, list) { - - if (port_info->phy_info && - (!(port_info->phy_info[0].identify.device_info & - MPI_SAS_DEVICE_INFO_SMP_TARGET))) - continue; - found_expander = 0; - handle = 0xFFFF; - while (!mptsas_sas_expander_pg0(ioc, &buffer, - (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE << - MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle) && - !found_expander) { - - handle = buffer.phy_info[0].handle; - if (buffer.phy_info[0].identify.sas_address == - port_info->phy_info[0].identify.sas_address) { - found_expander = 1; - } - kfree(buffer.phy_info); - } + phy_info = port_info->phy_info; + for (i = 0; i < port_info->num_phys; i++, phy_info++) + mptsas_port_delete(ioc, phy_info->port_details); - if (!found_expander) { - mptsas_expander_delete(ioc, port_info, 0); - goto redo_expander_scan; + list_del(&port_info->list); + kfree(port_info->phy_info); + kfree(port_info); } + /* + * Free this memory allocated from inside + * mptsas_sas_expander_pg0 + */ + kfree(buffer.phy_info); } - mutex_lock(&ioc->sas_topology_mutex); -} - -/** - * mptsas_probe_expanders - adding expanders - * @ioc: Pointer to MPT_ADAPTER structure - * - **/ -static void -mptsas_probe_expanders(MPT_ADAPTER *ioc) -{ - struct mptsas_portinfo buffer, *port_info; - u32 handle; - int i; - - handle = 0xFFFF; - while (!mptsas_sas_expander_pg0(ioc, &buffer, - (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE << - MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)) { - - handle = buffer.phy_info[0].handle; - port_info = mptsas_find_portinfo_by_sas_address(ioc, - buffer.phy_info[0].identify.sas_address); - - if (port_info) { - /* refreshing handles */ - for (i = 0; i < buffer.num_phys; i++) { - port_info->phy_info[i].handle = handle; - port_info->phy_info[i].identify.handle_parent = - buffer.phy_info[0].identify.handle_parent; - } - mptsas_expander_refresh(ioc, port_info); - kfree(buffer.phy_info); - continue; - } - - port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL); - if (!port_info) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: exit at line=%d\n", ioc->name, - __func__, __LINE__)); - return; - } - port_info->num_phys = buffer.num_phys; - port_info->phy_info = buffer.phy_info; - for (i = 0; i < port_info->num_phys; i++) - port_info->phy_info[i].portinfo = port_info; - mutex_lock(&ioc->sas_topology_mutex); - list_add_tail(&port_info->list, &ioc->sas_topology); - mutex_unlock(&ioc->sas_topology_mutex); - printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, " - "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys, - (unsigned long long)buffer.phy_info[0].identify.sas_address); - mptsas_expander_refresh(ioc, port_info); - } -} - -static void -mptsas_probe_devices(MPT_ADAPTER *ioc) -{ - u16 handle; - struct mptsas_devinfo sas_device; - struct mptsas_phyinfo *phy_info; - - handle = 0xFFFF; - while (!(mptsas_sas_device_pg0(ioc, &sas_device, - MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) { - - handle = sas_device.handle; - - if ((sas_device.device_info & - (MPI_SAS_DEVICE_INFO_SSP_TARGET | - MPI_SAS_DEVICE_INFO_STP_TARGET | - MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0) - continue; - - phy_info = mptsas_refreshing_device_handles(ioc, &sas_device); - if (!phy_info) - continue; - - if (mptsas_get_rphy(phy_info)) - continue; - - mptsas_add_end_device(ioc, phy_info); - } + mutex_unlock(&ioc->sas_topology_mutex); } -/** - * mptsas_scan_sas_topology - - * @ioc: Pointer to MPT_ADAPTER structure - * @sas_address: - * - **/ +/* + * Start of day discovery + */ static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc) { - struct scsi_device *sdev; + u32 handle = 0xFFFF; int i; + mutex_lock(&ioc->sas_discovery_mutex); mptsas_probe_hba_phys(ioc); - mptsas_probe_expanders(ioc); - mptsas_probe_devices(ioc); - + while (!mptsas_probe_expander_phys(ioc, &handle)) + ; /* Reporting RAID volumes. */ - if (!ioc->ir_firmware || !ioc->raid_data.pIocPg2 || - !ioc->raid_data.pIocPg2->NumActiveVolumes) - return; + if (!ioc->ir_firmware) + goto out; + if (!ioc->raid_data.pIocPg2) + goto out; + if (!ioc->raid_data.pIocPg2->NumActiveVolumes) + goto out; for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { - sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, - ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0); - if (sdev) { - scsi_device_put(sdev); - continue; - } - printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, " - "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, - ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID); scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0); } + out: + mutex_unlock(&ioc->sas_discovery_mutex); } - +/* + * Work queue thread to handle Runtime discovery + * Mere purpose is the hot add/delete of expanders + *(Mutex UNLOCKED) + */ static void -mptsas_handle_queue_full_event(struct fw_event_work *fw_event) +__mptsas_discovery_work(MPT_ADAPTER *ioc) { - MPT_ADAPTER *ioc; - EventDataQueueFull_t *qfull_data; - struct mptsas_device_info *sas_info; - struct scsi_device *sdev; - int depth; - int id = -1; - int channel = -1; - int fw_id, fw_channel; - u16 current_depth; - - - ioc = fw_event->ioc; - qfull_data = (EventDataQueueFull_t *)fw_event->event_data; - fw_id = qfull_data->TargetID; - fw_channel = qfull_data->Bus; - current_depth = le16_to_cpu(qfull_data->CurrentDepth); - - /* if hidden raid component, look for the volume id */ - mutex_lock(&ioc->sas_device_info_mutex); - if (mptscsih_is_phys_disk(ioc, fw_channel, fw_id)) { - list_for_each_entry(sas_info, &ioc->sas_device_info_list, - list) { - if (sas_info->is_cached || - sas_info->is_logical_volume) - continue; - if (sas_info->is_hidden_raid_component && - (sas_info->fw.channel == fw_channel && - sas_info->fw.id == fw_id)) { - id = sas_info->volume_id; - channel = MPTSAS_RAID_CHANNEL; - goto out; - } - } - } else { - list_for_each_entry(sas_info, &ioc->sas_device_info_list, - list) { - if (sas_info->is_cached || - sas_info->is_hidden_raid_component || - sas_info->is_logical_volume) - continue; - if (sas_info->fw.channel == fw_channel && - sas_info->fw.id == fw_id) { - id = sas_info->os.id; - channel = sas_info->os.channel; - goto out; - } - } + u32 handle = 0xFFFF; - } + ioc->sas_discovery_runtime=1; + mptsas_delete_expander_phys(ioc); + mptsas_probe_hba_phys(ioc); + while (!mptsas_probe_expander_phys(ioc, &handle)) + ; + ioc->sas_discovery_runtime=0; +} - out: - mutex_unlock(&ioc->sas_device_info_mutex); - - if (id != -1) { - shost_for_each_device(sdev, ioc->sh) { - if (sdev->id == id && sdev->channel == channel) { - if (current_depth > sdev->queue_depth) { - sdev_printk(KERN_INFO, sdev, - "strange observation, the queue " - "depth is (%d) meanwhile fw queue " - "depth (%d)\n", sdev->queue_depth, - current_depth); - continue; - } - depth = scsi_track_queue_full(sdev, - current_depth - 1); - if (depth > 0) - sdev_printk(KERN_INFO, sdev, - "Queue depth reduced to (%d)\n", - depth); - else if (depth < 0) - sdev_printk(KERN_INFO, sdev, - "Tagged Command Queueing is being " - "disabled\n"); - else if (depth == 0) - sdev_printk(KERN_INFO, sdev, - "Queue depth not changed yet\n"); - } - } - } +/* + * Work queue thread to handle Runtime discovery + * Mere purpose is the hot add/delete of expanders + *(Mutex LOCKED) + */ +static void +mptsas_discovery_work(struct work_struct *work) +{ + struct mptsas_discovery_event *ev = + container_of(work, struct mptsas_discovery_event, work); + MPT_ADAPTER *ioc = ev->ioc; - mptsas_free_fw_event(ioc, fw_event); + mutex_lock(&ioc->sas_discovery_mutex); + __mptsas_discovery_work(ioc); + mutex_unlock(&ioc->sas_discovery_mutex); + kfree(ev); } - static struct mptsas_phyinfo * mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address) { @@ -3786,80 +2429,69 @@ mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address) return phy_info; } -/** - * mptsas_find_phyinfo_by_phys_disk_num - - * @ioc: Pointer to MPT_ADAPTER structure - * @phys_disk_num: - * @channel: - * @id: - * - **/ static struct mptsas_phyinfo * -mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num, - u8 channel, u8 id) +mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u8 channel, u8 id) { - struct mptsas_phyinfo *phy_info = NULL; struct mptsas_portinfo *port_info; - RaidPhysDiskPage1_t *phys_disk = NULL; - int num_paths; - u64 sas_address = 0; + struct mptsas_phyinfo *phy_info = NULL; int i; - phy_info = NULL; - if (!ioc->raid_data.pIocPg3) - return NULL; - /* dual port support */ - num_paths = mpt_raid_phys_disk_get_num_paths(ioc, phys_disk_num); - if (!num_paths) - goto out; - phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) + - (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL); - if (!phys_disk) - goto out; - mpt_raid_phys_disk_pg1(ioc, phys_disk_num, phys_disk); - for (i = 0; i < num_paths; i++) { - if ((phys_disk->Path[i].Flags & 1) != 0) - /* entry no longer valid */ - continue; - if ((id == phys_disk->Path[i].PhysDiskID) && - (channel == phys_disk->Path[i].PhysDiskBus)) { - memcpy(&sas_address, &phys_disk->Path[i].WWID, - sizeof(u64)); - phy_info = mptsas_find_phyinfo_by_sas_address(ioc, - sas_address); - goto out; + mutex_lock(&ioc->sas_topology_mutex); + list_for_each_entry(port_info, &ioc->sas_topology, list) { + for (i = 0; i < port_info->num_phys; i++) { + if (!mptsas_is_end_device( + &port_info->phy_info[i].attached)) + continue; + if (port_info->phy_info[i].attached.id != id) + continue; + if (port_info->phy_info[i].attached.channel != channel) + continue; + phy_info = &port_info->phy_info[i]; + break; } } + mutex_unlock(&ioc->sas_topology_mutex); + return phy_info; +} - out: - kfree(phys_disk); - if (phy_info) - return phy_info; +static struct mptsas_phyinfo * +mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 channel, u8 id) +{ + struct mptsas_portinfo *port_info; + struct mptsas_phyinfo *phy_info = NULL; + int i; - /* - * Extra code to handle RAID0 case, where the sas_address is not updated - * in phys_disk_page_1 when hotswapped - */ mutex_lock(&ioc->sas_topology_mutex); list_for_each_entry(port_info, &ioc->sas_topology, list) { - for (i = 0; i < port_info->num_phys && !phy_info; i++) { + for (i = 0; i < port_info->num_phys; i++) { if (!mptsas_is_end_device( &port_info->phy_info[i].attached)) continue; if (port_info->phy_info[i].attached.phys_disk_num == ~0) continue; - if ((port_info->phy_info[i].attached.phys_disk_num == - phys_disk_num) && - (port_info->phy_info[i].attached.id == id) && - (port_info->phy_info[i].attached.channel == - channel)) - phy_info = &port_info->phy_info[i]; + if (port_info->phy_info[i].attached.phys_disk_num != id) + continue; + if (port_info->phy_info[i].attached.channel != channel) + continue; + phy_info = &port_info->phy_info[i]; + break; } } mutex_unlock(&ioc->sas_topology_mutex); return phy_info; } +/* + * Work queue thread to clear the persitency table + */ +static void +mptsas_persist_clear_table(struct work_struct *work) +{ + MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task); + + mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT); +} + static void mptsas_reprobe_lun(struct scsi_device *sdev, void *data) { @@ -3885,8 +2517,7 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id) pRaidVolumePage0_t buffer = NULL; RaidPhysDiskPage0_t phys_disk; int i; - struct mptsas_phyinfo *phy_info; - struct mptsas_devinfo sas_device; + struct mptsas_hotplug_event *ev; memset(&cfg, 0 , sizeof(CONFIGPARMS)); memset(&hdr, 0 , sizeof(ConfigPageHeader_t)); @@ -3926,16 +2557,20 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id) buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0) continue; - if (mptsas_sas_device_pg0(ioc, &sas_device, - (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << - MPI_SAS_DEVICE_PGAD_FORM_SHIFT), - (phys_disk.PhysDiskBus << 8) + - phys_disk.PhysDiskID)) - continue; + ev = kzalloc(sizeof(*ev), GFP_ATOMIC); + if (!ev) { + printk(MYIOC_s_WARN_FMT "mptsas: lost hotplug event\n", ioc->name); + goto out; + } - phy_info = mptsas_find_phyinfo_by_sas_address(ioc, - sas_device.sas_address); - mptsas_add_end_device(ioc, phy_info); + INIT_WORK(&ev->work, mptsas_hotplug_work); + ev->ioc = ioc; + ev->id = phys_disk.PhysDiskID; + ev->channel = phys_disk.PhysDiskBus; + ev->phys_disk_num_valid = 1; + ev->phys_disk_num = phys_disk.PhysDiskNum; + ev->event_type = MPTSAS_ADD_DEVICE; + schedule_work(&ev->work); } out: @@ -3947,386 +2582,417 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id) * Work queue thread to handle SAS hotplug events */ static void -mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event, - struct mptsas_hotplug_event *hot_plug_info) +mptsas_hotplug_work(struct work_struct *work) { - struct mptsas_phyinfo *phy_info; - struct scsi_target * starget; - struct mptsas_devinfo sas_device; - VirtTarget *vtarget; - int i; - - switch (hot_plug_info->event_type) { - - case MPTSAS_ADD_PHYSDISK: - - if (!ioc->raid_data.pIocPg2) - break; - - for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) { - if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == - hot_plug_info->id) { - printk(MYIOC_s_WARN_FMT "firmware bug: unable " - "to add hidden disk - target_id matchs " - "volume_id\n", ioc->name); - mptsas_free_fw_event(ioc, fw_event); - return; - } - } - mpt_findImVolumes(ioc); + struct mptsas_hotplug_event *ev = + container_of(work, struct mptsas_hotplug_event, work); - case MPTSAS_ADD_DEVICE: - memset(&sas_device, 0, sizeof(struct mptsas_devinfo)); - mptsas_sas_device_pg0(ioc, &sas_device, - (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << - MPI_SAS_DEVICE_PGAD_FORM_SHIFT), - (hot_plug_info->channel << 8) + - hot_plug_info->id); - - if (!sas_device.handle) - return; - - phy_info = mptsas_refreshing_device_handles(ioc, &sas_device); - if (!phy_info) - break; - - if (mptsas_get_rphy(phy_info)) - break; - - mptsas_add_end_device(ioc, phy_info); - break; + MPT_ADAPTER *ioc = ev->ioc; + struct mptsas_phyinfo *phy_info; + struct sas_rphy *rphy; + struct sas_port *port; + struct scsi_device *sdev; + struct scsi_target * starget; + struct sas_identify identify; + char *ds = NULL; + struct mptsas_devinfo sas_device; + VirtTarget *vtarget; + VirtDevice *vdevice; + mutex_lock(&ioc->sas_discovery_mutex); + switch (ev->event_type) { case MPTSAS_DEL_DEVICE: - phy_info = mptsas_find_phyinfo_by_sas_address(ioc, - hot_plug_info->sas_address); - mptsas_del_end_device(ioc, phy_info); - break; - - case MPTSAS_DEL_PHYSDISK: - mpt_findImVolumes(ioc); - - phy_info = mptsas_find_phyinfo_by_phys_disk_num( - ioc, hot_plug_info->phys_disk_num, - hot_plug_info->channel, - hot_plug_info->id); - mptsas_del_end_device(ioc, phy_info); - break; + phy_info = NULL; + if (ev->phys_disk_num_valid) { + if (ev->hidden_raid_component){ + if (mptsas_sas_device_pg0(ioc, &sas_device, + (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << + MPI_SAS_DEVICE_PGAD_FORM_SHIFT), + (ev->channel << 8) + ev->id)) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "%s: exit at line=%d\n", ioc->name, + __func__, __LINE__)); + break; + } + phy_info = mptsas_find_phyinfo_by_sas_address( + ioc, sas_device.sas_address); + }else + phy_info = mptsas_find_phyinfo_by_phys_disk_num( + ioc, ev->channel, ev->phys_disk_num); + } - case MPTSAS_ADD_PHYSDISK_REPROBE: + if (!phy_info) + phy_info = mptsas_find_phyinfo_by_target(ioc, + ev->channel, ev->id); - if (mptsas_sas_device_pg0(ioc, &sas_device, - (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << - MPI_SAS_DEVICE_PGAD_FORM_SHIFT), - (hot_plug_info->channel << 8) + hot_plug_info->id)) { + /* + * Sanity checks, for non-existing phys and remote rphys. + */ + if (!phy_info){ dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: fw_id=%d exit at line=%d\n", ioc->name, - __func__, hot_plug_info->id, __LINE__)); + "%s: exit at line=%d\n", ioc->name, + __func__, __LINE__)); break; } - - phy_info = mptsas_find_phyinfo_by_sas_address( - ioc, sas_device.sas_address); - - if (!phy_info) { + if (!phy_info->port_details) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: fw_id=%d exit at line=%d\n", ioc->name, - __func__, hot_plug_info->id, __LINE__)); + "%s: exit at line=%d\n", ioc->name, + __func__, __LINE__)); break; } - - starget = mptsas_get_starget(phy_info); - if (!starget) { + rphy = mptsas_get_rphy(phy_info); + if (!rphy) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: fw_id=%d exit at line=%d\n", ioc->name, - __func__, hot_plug_info->id, __LINE__)); + "%s: exit at line=%d\n", ioc->name, + __func__, __LINE__)); break; } - vtarget = starget->hostdata; - if (!vtarget) { + port = mptsas_get_port(phy_info); + if (!port) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: fw_id=%d exit at line=%d\n", ioc->name, - __func__, hot_plug_info->id, __LINE__)); + "%s: exit at line=%d\n", ioc->name, + __func__, __LINE__)); break; } - mpt_findImVolumes(ioc); + starget = mptsas_get_starget(phy_info); + if (starget) { + vtarget = starget->hostdata; + + if (!vtarget) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "%s: exit at line=%d\n", ioc->name, + __func__, __LINE__)); + break; + } - starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Hidding: " - "fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n", - ioc->name, hot_plug_info->channel, hot_plug_info->id, - hot_plug_info->phys_disk_num, (unsigned long long) - sas_device.sas_address); + /* + * Handling RAID components + */ + if (ev->phys_disk_num_valid && + ev->hidden_raid_component) { + printk(MYIOC_s_INFO_FMT + "RAID Hidding: channel=%d, id=%d, " + "physdsk %d \n", ioc->name, ev->channel, + ev->id, ev->phys_disk_num); + vtarget->id = ev->phys_disk_num; + vtarget->tflags |= + MPT_TARGET_FLAGS_RAID_COMPONENT; + mptsas_reprobe_target(starget, 1); + phy_info->attached.phys_disk_num = + ev->phys_disk_num; + break; + } + } - vtarget->id = hot_plug_info->phys_disk_num; - vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT; - phy_info->attached.phys_disk_num = hot_plug_info->phys_disk_num; - mptsas_reprobe_target(starget, 1); + if (phy_info->attached.device_info & + MPI_SAS_DEVICE_INFO_SSP_TARGET) + ds = "ssp"; + if (phy_info->attached.device_info & + MPI_SAS_DEVICE_INFO_STP_TARGET) + ds = "stp"; + if (phy_info->attached.device_info & + MPI_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "sata"; + + printk(MYIOC_s_INFO_FMT + "removing %s device, channel %d, id %d, phy %d\n", + ioc->name, ds, ev->channel, ev->id, phy_info->phy_id); + dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT + "delete port (%d)\n", ioc->name, port->port_identifier); + sas_port_delete(port); + mptsas_port_delete(ioc, phy_info->port_details); break; + case MPTSAS_ADD_DEVICE: - case MPTSAS_DEL_PHYSDISK_REPROBE: + if (ev->phys_disk_num_valid) + mpt_findImVolumes(ioc); + /* + * Refresh sas device pg0 data + */ if (mptsas_sas_device_pg0(ioc, &sas_device, (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), - (hot_plug_info->channel << 8) + hot_plug_info->id)) { + (ev->channel << 8) + ev->id)) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: fw_id=%d exit at line=%d\n", - ioc->name, __func__, - hot_plug_info->id, __LINE__)); + "%s: exit at line=%d\n", ioc->name, + __func__, __LINE__)); break; } + __mptsas_discovery_work(ioc); + phy_info = mptsas_find_phyinfo_by_sas_address(ioc, sas_device.sas_address); - if (!phy_info) { + + if (!phy_info || !phy_info->port_details) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: fw_id=%d exit at line=%d\n", ioc->name, - __func__, hot_plug_info->id, __LINE__)); + "%s: exit at line=%d\n", ioc->name, + __func__, __LINE__)); break; } starget = mptsas_get_starget(phy_info); - if (!starget) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: fw_id=%d exit at line=%d\n", ioc->name, - __func__, hot_plug_info->id, __LINE__)); + if (starget && (!ev->hidden_raid_component)){ + + vtarget = starget->hostdata; + + if (!vtarget) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "%s: exit at line=%d\n", ioc->name, + __func__, __LINE__)); + break; + } + /* + * Handling RAID components + */ + if (vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) { + printk(MYIOC_s_INFO_FMT + "RAID Exposing: channel=%d, id=%d, " + "physdsk %d \n", ioc->name, ev->channel, + ev->id, ev->phys_disk_num); + vtarget->tflags &= + ~MPT_TARGET_FLAGS_RAID_COMPONENT; + vtarget->id = ev->id; + mptsas_reprobe_target(starget, 0); + phy_info->attached.phys_disk_num = ~0; + } break; } - vtarget = starget->hostdata; - if (!vtarget) { + if (mptsas_get_rphy(phy_info)) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: fw_id=%d exit at line=%d\n", ioc->name, - __func__, hot_plug_info->id, __LINE__)); + "%s: exit at line=%d\n", ioc->name, + __func__, __LINE__)); + if (ev->channel) printk("%d\n", __LINE__); break; } - if (!(vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)) { + port = mptsas_get_port(phy_info); + if (!port) { dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "%s: fw_id=%d exit at line=%d\n", ioc->name, - __func__, hot_plug_info->id, __LINE__)); + "%s: exit at line=%d\n", ioc->name, + __func__, __LINE__)); break; } + memcpy(&phy_info->attached, &sas_device, + sizeof(struct mptsas_devinfo)); + + if (phy_info->attached.device_info & + MPI_SAS_DEVICE_INFO_SSP_TARGET) + ds = "ssp"; + if (phy_info->attached.device_info & + MPI_SAS_DEVICE_INFO_STP_TARGET) + ds = "stp"; + if (phy_info->attached.device_info & + MPI_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "sata"; + + printk(MYIOC_s_INFO_FMT + "attaching %s device, channel %d, id %d, phy %d\n", + ioc->name, ds, ev->channel, ev->id, ev->phy_id); - mpt_findImVolumes(ioc); + mptsas_parse_device_info(&identify, &phy_info->attached); + rphy = sas_end_device_alloc(port); + if (!rphy) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "%s: exit at line=%d\n", ioc->name, + __func__, __LINE__)); + break; /* non-fatal: an rphy can be added later */ + } - starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Exposing:" - " fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n", - ioc->name, hot_plug_info->channel, hot_plug_info->id, - hot_plug_info->phys_disk_num, (unsigned long long) - sas_device.sas_address); - - vtarget->tflags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT; - vtarget->id = hot_plug_info->id; - phy_info->attached.phys_disk_num = ~0; - mptsas_reprobe_target(starget, 0); - mptsas_add_device_component_by_fw(ioc, - hot_plug_info->channel, hot_plug_info->id); + rphy->identify = identify; + if (sas_rphy_add(rphy)) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT + "%s: exit at line=%d\n", ioc->name, + __func__, __LINE__)); + sas_rphy_free(rphy); + break; + } + mptsas_set_rphy(ioc, phy_info, rphy); break; - case MPTSAS_ADD_RAID: - + sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, + ev->id, 0); + if (sdev) { + scsi_device_put(sdev); + break; + } + printk(MYIOC_s_INFO_FMT + "attaching raid volume, channel %d, id %d\n", + ioc->name, MPTSAS_RAID_CHANNEL, ev->id); + scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, ev->id, 0); mpt_findImVolumes(ioc); - printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, " - "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, - hot_plug_info->id); - scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, - hot_plug_info->id, 0); break; - case MPTSAS_DEL_RAID: - + sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, + ev->id, 0); + if (!sdev) + break; + printk(MYIOC_s_INFO_FMT + "removing raid volume, channel %d, id %d\n", + ioc->name, MPTSAS_RAID_CHANNEL, ev->id); + vdevice = sdev->hostdata; + scsi_remove_device(sdev); + scsi_device_put(sdev); mpt_findImVolumes(ioc); - printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, " - "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, - hot_plug_info->id); - scsi_remove_device(hot_plug_info->sdev); - scsi_device_put(hot_plug_info->sdev); break; - case MPTSAS_ADD_INACTIVE_VOLUME: - - mpt_findImVolumes(ioc); mptsas_adding_inactive_raid_components(ioc, - hot_plug_info->channel, hot_plug_info->id); + ev->channel, ev->id); break; - + case MPTSAS_IGNORE_EVENT: default: break; } - mptsas_free_fw_event(ioc, fw_event); + mutex_unlock(&ioc->sas_discovery_mutex); + kfree(ev); } static void -mptsas_send_sas_event(struct fw_event_work *fw_event) +mptsas_send_sas_event(MPT_ADAPTER *ioc, + EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data) { - MPT_ADAPTER *ioc; - struct mptsas_hotplug_event hot_plug_info; - EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data; - u32 device_info; - u64 sas_address; - - ioc = fw_event->ioc; - sas_event_data = (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *) - fw_event->event_data; - device_info = le32_to_cpu(sas_event_data->DeviceInfo); + struct mptsas_hotplug_event *ev; + u32 device_info = le32_to_cpu(sas_event_data->DeviceInfo); + __le64 sas_address; if ((device_info & - (MPI_SAS_DEVICE_INFO_SSP_TARGET | - MPI_SAS_DEVICE_INFO_STP_TARGET | - MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0) { - mptsas_free_fw_event(ioc, fw_event); - return; - } - - if (sas_event_data->ReasonCode == - MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED) { - mptbase_sas_persist_operation(ioc, - MPI_SAS_OP_CLEAR_NOT_PRESENT); - mptsas_free_fw_event(ioc, fw_event); + (MPI_SAS_DEVICE_INFO_SSP_TARGET | + MPI_SAS_DEVICE_INFO_STP_TARGET | + MPI_SAS_DEVICE_INFO_SATA_DEVICE )) == 0) return; - } switch (sas_event_data->ReasonCode) { case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: + + mptsas_target_reset_queue(ioc, sas_event_data); + break; + case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: - memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event)); - hot_plug_info.handle = le16_to_cpu(sas_event_data->DevHandle); - hot_plug_info.channel = sas_event_data->Bus; - hot_plug_info.id = sas_event_data->TargetID; - hot_plug_info.phy_id = sas_event_data->PhyNum; + ev = kzalloc(sizeof(*ev), GFP_ATOMIC); + if (!ev) { + printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name); + break; + } + + INIT_WORK(&ev->work, mptsas_hotplug_work); + ev->ioc = ioc; + ev->handle = le16_to_cpu(sas_event_data->DevHandle); + ev->parent_handle = + le16_to_cpu(sas_event_data->ParentDevHandle); + ev->channel = sas_event_data->Bus; + ev->id = sas_event_data->TargetID; + ev->phy_id = sas_event_data->PhyNum; memcpy(&sas_address, &sas_event_data->SASAddress, - sizeof(u64)); - hot_plug_info.sas_address = le64_to_cpu(sas_address); - hot_plug_info.device_info = device_info; + sizeof(__le64)); + ev->sas_address = le64_to_cpu(sas_address); + ev->device_info = device_info; + if (sas_event_data->ReasonCode & MPI_EVENT_SAS_DEV_STAT_RC_ADDED) - hot_plug_info.event_type = MPTSAS_ADD_DEVICE; + ev->event_type = MPTSAS_ADD_DEVICE; else - hot_plug_info.event_type = MPTSAS_DEL_DEVICE; - mptsas_hotplug_work(ioc, fw_event, &hot_plug_info); + ev->event_type = MPTSAS_DEL_DEVICE; + schedule_work(&ev->work); break; - case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED: - mptbase_sas_persist_operation(ioc, - MPI_SAS_OP_CLEAR_NOT_PRESENT); - mptsas_free_fw_event(ioc, fw_event); + /* + * Persistent table is full. + */ + INIT_WORK(&ioc->sas_persist_task, + mptsas_persist_clear_table); + schedule_work(&ioc->sas_persist_task); break; - + /* + * TODO, handle other events + */ case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA: - /* TODO */ + case MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: - /* TODO */ + case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: + case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: + case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: + case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: default: - mptsas_free_fw_event(ioc, fw_event); break; } } - static void -mptsas_send_raid_event(struct fw_event_work *fw_event) +mptsas_send_raid_event(MPT_ADAPTER *ioc, + EVENT_DATA_RAID *raid_event_data) { - MPT_ADAPTER *ioc; - EVENT_DATA_RAID *raid_event_data; - struct mptsas_hotplug_event hot_plug_info; - int status; - int state; - struct scsi_device *sdev = NULL; - VirtDevice *vdevice = NULL; - RaidPhysDiskPage0_t phys_disk; - - ioc = fw_event->ioc; - raid_event_data = (EVENT_DATA_RAID *)fw_event->event_data; - status = le32_to_cpu(raid_event_data->SettingsStatus); - state = (status >> 8) & 0xff; - - memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event)); - hot_plug_info.id = raid_event_data->VolumeID; - hot_plug_info.channel = raid_event_data->VolumeBus; - hot_plug_info.phys_disk_num = raid_event_data->PhysDiskNum; - - if (raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_DELETED || - raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_CREATED || - raid_event_data->ReasonCode == - MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED) { - sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, - hot_plug_info.id, 0); - hot_plug_info.sdev = sdev; - if (sdev) - vdevice = sdev->hostdata; + struct mptsas_hotplug_event *ev; + int status = le32_to_cpu(raid_event_data->SettingsStatus); + int state = (status >> 8) & 0xff; + + if (ioc->bus_type != SAS) + return; + + ev = kzalloc(sizeof(*ev), GFP_ATOMIC); + if (!ev) { + printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name); + return; } - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: " - "ReasonCode=%02x\n", ioc->name, __func__, - raid_event_data->ReasonCode)); + INIT_WORK(&ev->work, mptsas_hotplug_work); + ev->ioc = ioc; + ev->id = raid_event_data->VolumeID; + ev->channel = raid_event_data->VolumeBus; + ev->event_type = MPTSAS_IGNORE_EVENT; switch (raid_event_data->ReasonCode) { case MPI_EVENT_RAID_RC_PHYSDISK_DELETED: - hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK_REPROBE; + ev->phys_disk_num_valid = 1; + ev->phys_disk_num = raid_event_data->PhysDiskNum; + ev->event_type = MPTSAS_ADD_DEVICE; break; case MPI_EVENT_RAID_RC_PHYSDISK_CREATED: - hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK_REPROBE; + ev->phys_disk_num_valid = 1; + ev->phys_disk_num = raid_event_data->PhysDiskNum; + ev->hidden_raid_component = 1; + ev->event_type = MPTSAS_DEL_DEVICE; break; case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED: switch (state) { case MPI_PD_STATE_ONLINE: case MPI_PD_STATE_NOT_COMPATIBLE: - mpt_raid_phys_disk_pg0(ioc, - raid_event_data->PhysDiskNum, &phys_disk); - hot_plug_info.id = phys_disk.PhysDiskID; - hot_plug_info.channel = phys_disk.PhysDiskBus; - hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK; + ev->phys_disk_num_valid = 1; + ev->phys_disk_num = raid_event_data->PhysDiskNum; + ev->hidden_raid_component = 1; + ev->event_type = MPTSAS_ADD_DEVICE; break; - case MPI_PD_STATE_FAILED: case MPI_PD_STATE_MISSING: case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST: case MPI_PD_STATE_FAILED_AT_HOST_REQUEST: case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON: - hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK; + ev->phys_disk_num_valid = 1; + ev->phys_disk_num = raid_event_data->PhysDiskNum; + ev->event_type = MPTSAS_DEL_DEVICE; break; default: break; } break; case MPI_EVENT_RAID_RC_VOLUME_DELETED: - if (!sdev) - break; - vdevice->vtarget->deleted = 1; /* block IO */ - hot_plug_info.event_type = MPTSAS_DEL_RAID; + ev->event_type = MPTSAS_DEL_RAID; break; case MPI_EVENT_RAID_RC_VOLUME_CREATED: - if (sdev) { - scsi_device_put(sdev); - break; - } - hot_plug_info.event_type = MPTSAS_ADD_RAID; + ev->event_type = MPTSAS_ADD_RAID; break; case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED: - if (!(status & MPI_RAIDVOL0_STATUS_FLAG_ENABLED)) { - if (!sdev) - break; - vdevice->vtarget->deleted = 1; /* block IO */ - hot_plug_info.event_type = MPTSAS_DEL_RAID; - break; - } switch (state) { case MPI_RAIDVOL0_STATUS_STATE_FAILED: case MPI_RAIDVOL0_STATUS_STATE_MISSING: - if (!sdev) - break; - vdevice->vtarget->deleted = 1; /* block IO */ - hot_plug_info.event_type = MPTSAS_DEL_RAID; + ev->event_type = MPTSAS_DEL_RAID; break; case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL: case MPI_RAIDVOL0_STATUS_STATE_DEGRADED: - if (sdev) { - scsi_device_put(sdev); - break; - } - hot_plug_info.event_type = MPTSAS_ADD_RAID; + ev->event_type = MPTSAS_ADD_RAID; break; default: break; @@ -4335,188 +3001,32 @@ mptsas_send_raid_event(struct fw_event_work *fw_event) default: break; } - - if (hot_plug_info.event_type != MPTSAS_IGNORE_EVENT) - mptsas_hotplug_work(ioc, fw_event, &hot_plug_info); - else - mptsas_free_fw_event(ioc, fw_event); -} - -/** - * mptsas_issue_tm - send mptsas internal tm request - * @ioc: Pointer to MPT_ADAPTER structure - * @type: Task Management type - * @channel: channel number for task management - * @id: Logical Target ID for reset (if appropriate) - * @lun: Logical unit for reset (if appropriate) - * @task_context: Context for the task to be aborted - * @timeout: timeout for task management control - * - * return 0 on success and -1 on failure: - * - */ -static int -mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun, - int task_context, ulong timeout, u8 *issue_reset) -{ - MPT_FRAME_HDR *mf; - SCSITaskMgmt_t *pScsiTm; - int retval; - unsigned long timeleft; - - *issue_reset = 0; - mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc); - if (mf == NULL) { - retval = -1; /* return failure */ - dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "TaskMgmt request: no " - "msg frames!!\n", ioc->name)); - goto out; - } - - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request: mr = %p, " - "task_type = 0x%02X,\n\t timeout = %ld, fw_channel = %d, " - "fw_id = %d, lun = %lld,\n\t task_context = 0x%x\n", ioc->name, mf, - type, timeout, channel, id, (unsigned long long)lun, - task_context)); - - pScsiTm = (SCSITaskMgmt_t *) mf; - memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t)); - pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT; - pScsiTm->TaskType = type; - pScsiTm->MsgFlags = 0; - pScsiTm->TargetID = id; - pScsiTm->Bus = channel; - pScsiTm->ChainOffset = 0; - pScsiTm->Reserved = 0; - pScsiTm->Reserved1 = 0; - pScsiTm->TaskMsgContext = task_context; - int_to_scsilun(lun, (struct scsi_lun *)pScsiTm->LUN); - - INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status) - CLEAR_MGMT_STATUS(ioc->internal_cmds.status) - retval = 0; - mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf); - - /* Now wait for the command to complete */ - timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, - timeout*HZ); - if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { - retval = -1; /* return failure */ - dtmprintk(ioc, printk(MYIOC_s_ERR_FMT - "TaskMgmt request: TIMED OUT!(mr=%p)\n", ioc->name, mf)); - mpt_free_msg_frame(ioc, mf); - if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) - goto out; - *issue_reset = 1; - goto out; - } - - if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { - retval = -1; /* return failure */ - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "TaskMgmt request: failed with no reply\n", ioc->name)); - goto out; - } - - out: - CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status) - return retval; + schedule_work(&ev->work); } -/** - * mptsas_broadcast_primative_work - Handle broadcast primitives - * @work: work queue payload containing info describing the event - * - * this will be handled in workqueue context. - */ static void -mptsas_broadcast_primative_work(struct fw_event_work *fw_event) +mptsas_send_discovery_event(MPT_ADAPTER *ioc, + EVENT_DATA_SAS_DISCOVERY *discovery_data) { - MPT_ADAPTER *ioc = fw_event->ioc; - MPT_FRAME_HDR *mf; - VirtDevice *vdevice; - int ii; - struct scsi_cmnd *sc; - SCSITaskMgmtReply_t *pScsiTmReply; - u8 issue_reset; - int task_context; - u8 channel, id; - int lun; - u32 termination_count; - u32 query_count; - - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s - enter\n", ioc->name, __func__)); - - mutex_lock(&ioc->taskmgmt_cmds.mutex); - if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { - mutex_unlock(&ioc->taskmgmt_cmds.mutex); - mptsas_requeue_fw_event(ioc, fw_event, 1000); - return; - } - - issue_reset = 0; - termination_count = 0; - query_count = 0; - mpt_findImVolumes(ioc); - pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply; - - for (ii = 0; ii < ioc->req_depth; ii++) { - if (ioc->fw_events_off) - goto out; - sc = mptscsih_get_scsi_lookup(ioc, ii); - if (!sc) - continue; - mf = MPT_INDEX_2_MFPTR(ioc, ii); - if (!mf) - continue; - task_context = mf->u.frame.hwhdr.msgctxu.MsgContext; - vdevice = sc->device->hostdata; - if (!vdevice || !vdevice->vtarget) - continue; - if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) - continue; /* skip hidden raid components */ - if (vdevice->vtarget->raidVolume) - continue; /* skip hidden raid components */ - channel = vdevice->vtarget->channel; - id = vdevice->vtarget->id; - lun = vdevice->lun; - if (mptsas_issue_tm(ioc, MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK, - channel, id, (u64)lun, task_context, 30, &issue_reset)) - goto out; - query_count++; - termination_count += - le32_to_cpu(pScsiTmReply->TerminationCount); - if ((pScsiTmReply->IOCStatus == MPI_IOCSTATUS_SUCCESS) && - (pScsiTmReply->ResponseCode == - MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED || - pScsiTmReply->ResponseCode == - MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC)) - continue; - if (mptsas_issue_tm(ioc, - MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, - channel, id, (u64)lun, 0, 30, &issue_reset)) - goto out; - termination_count += - le32_to_cpu(pScsiTmReply->TerminationCount); - } + struct mptsas_discovery_event *ev; - out: - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s - exit, query_count = %d termination_count = %d\n", - ioc->name, __func__, query_count, termination_count)); - - ioc->broadcast_aen_busy = 0; - mpt_clear_taskmgmt_in_progress_flag(ioc); - mutex_unlock(&ioc->taskmgmt_cmds.mutex); + /* + * DiscoveryStatus + * + * This flag will be non-zero when firmware + * kicks off discovery, and return to zero + * once its completed. + */ + if (discovery_data->DiscoveryStatus) + return; - if (issue_reset) { - printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", - ioc->name, __func__); - mpt_HardResetHandler(ioc, CAN_SLEEP); - } - mptsas_free_fw_event(ioc, fw_event); -} + ev = kzalloc(sizeof(*ev), GFP_ATOMIC); + if (!ev) + return; + INIT_WORK(&ev->work, mptsas_discovery_work); + ev->ioc = ioc; + schedule_work(&ev->work); +}; /* * mptsas_send_ir2_event - handle exposing hidden disk when @@ -4527,159 +3037,76 @@ mptsas_broadcast_primative_work(struct fw_event_work *fw_event) * */ static void -mptsas_send_ir2_event(struct fw_event_work *fw_event) +mptsas_send_ir2_event(MPT_ADAPTER *ioc, PTR_MPI_EVENT_DATA_IR2 ir2_data) { - MPT_ADAPTER *ioc; - struct mptsas_hotplug_event hot_plug_info; - MPI_EVENT_DATA_IR2 *ir2_data; - u8 reasonCode; - RaidPhysDiskPage0_t phys_disk; - - ioc = fw_event->ioc; - ir2_data = (MPI_EVENT_DATA_IR2 *)fw_event->event_data; - reasonCode = ir2_data->ReasonCode; - - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: " - "ReasonCode=%02x\n", ioc->name, __func__, reasonCode)); - - memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event)); - hot_plug_info.id = ir2_data->TargetID; - hot_plug_info.channel = ir2_data->Bus; - switch (reasonCode) { - case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED: - hot_plug_info.event_type = MPTSAS_ADD_INACTIVE_VOLUME; - break; - case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED: - hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum; - hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK; - break; - case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED: - hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum; - mpt_raid_phys_disk_pg0(ioc, - ir2_data->PhysDiskNum, &phys_disk); - hot_plug_info.id = phys_disk.PhysDiskID; - hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK; - break; - default: - mptsas_free_fw_event(ioc, fw_event); + struct mptsas_hotplug_event *ev; + + if (ir2_data->ReasonCode != + MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED) + return; + + ev = kzalloc(sizeof(*ev), GFP_ATOMIC); + if (!ev) return; - } - mptsas_hotplug_work(ioc, fw_event, &hot_plug_info); -} + + INIT_WORK(&ev->work, mptsas_hotplug_work); + ev->ioc = ioc; + ev->id = ir2_data->TargetID; + ev->channel = ir2_data->Bus; + ev->event_type = MPTSAS_ADD_INACTIVE_VOLUME; + + schedule_work(&ev->work); +}; static int mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply) { - u32 event = le32_to_cpu(reply->Event); - int sz, event_data_sz; - struct fw_event_work *fw_event; - unsigned long delay; + int rc=1; + u8 event = le32_to_cpu(reply->Event) & 0xFF; - /* events turned off due to host reset or driver unloading */ - if (ioc->fw_events_off) - return 0; + if (!ioc->sh) + goto out; - delay = msecs_to_jiffies(1); - switch (event) { - case MPI_EVENT_SAS_BROADCAST_PRIMITIVE: - { - EVENT_DATA_SAS_BROADCAST_PRIMITIVE *broadcast_event_data = - (EVENT_DATA_SAS_BROADCAST_PRIMITIVE *)reply->Data; - if (broadcast_event_data->Primitive != - MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) - return 0; - if (ioc->broadcast_aen_busy) - return 0; - ioc->broadcast_aen_busy = 1; - break; + /* + * sas_discovery_ignore_events + * + * This flag is to prevent anymore processing of + * sas events once mptsas_remove function is called. + */ + if (ioc->sas_discovery_ignore_events) { + rc = mptscsih_event_process(ioc, reply); + goto out; } + + switch (event) { case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: - { - EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data = - (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data; - - if (sas_event_data->ReasonCode == - MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING) { - mptsas_target_reset_queue(ioc, sas_event_data); - return 0; - } - break; - } - case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE: - { - MpiEventDataSasExpanderStatusChange_t *expander_data = - (MpiEventDataSasExpanderStatusChange_t *)reply->Data; - - if (ioc->old_sas_discovery_protocal) - return 0; - - if (expander_data->ReasonCode == - MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING && - ioc->device_missing_delay) - delay = HZ * ioc->device_missing_delay; + mptsas_send_sas_event(ioc, + (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data); break; - } - case MPI_EVENT_SAS_DISCOVERY: - { - u32 discovery_status; - EventDataSasDiscovery_t *discovery_data = - (EventDataSasDiscovery_t *)reply->Data; - - discovery_status = le32_to_cpu(discovery_data->DiscoveryStatus); - ioc->sas_discovery_quiesce_io = discovery_status ? 1 : 0; - if (ioc->old_sas_discovery_protocal && !discovery_status) - mptsas_queue_rescan(ioc); - return 0; - } case MPI_EVENT_INTEGRATED_RAID: + mptsas_send_raid_event(ioc, + (EVENT_DATA_RAID *)reply->Data); + break; case MPI_EVENT_PERSISTENT_TABLE_FULL: + INIT_WORK(&ioc->sas_persist_task, + mptsas_persist_clear_table); + schedule_work(&ioc->sas_persist_task); + break; + case MPI_EVENT_SAS_DISCOVERY: + mptsas_send_discovery_event(ioc, + (EVENT_DATA_SAS_DISCOVERY *)reply->Data); + break; case MPI_EVENT_IR2: - case MPI_EVENT_SAS_PHY_LINK_STATUS: - case MPI_EVENT_QUEUE_FULL: + mptsas_send_ir2_event(ioc, + (PTR_MPI_EVENT_DATA_IR2)reply->Data); break; default: - return 0; - } - - event_data_sz = ((reply->MsgLength * 4) - - offsetof(EventNotificationReply_t, Data)); - sz = offsetof(struct fw_event_work, event_data) + event_data_sz; - fw_event = kzalloc(sz, GFP_ATOMIC); - if (!fw_event) { - printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", ioc->name, - __func__, __LINE__); - return 0; + rc = mptscsih_event_process(ioc, reply); + break; } - memcpy(fw_event->event_data, reply->Data, event_data_sz); - fw_event->event = event; - fw_event->ioc = ioc; - mptsas_add_fw_event(ioc, fw_event, delay); - return 0; -} - -/* Delete a volume when no longer listed in ioc pg2 - */ -static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id) -{ - struct scsi_device *sdev; - int i; - - sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, id, 0); - if (!sdev) - return; - if (!ioc->raid_data.pIocPg2) - goto out; - if (!ioc->raid_data.pIocPg2->NumActiveVolumes) - goto out; - for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) - if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id) - goto release_sdev; out: - printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, " - "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, id); - scsi_remove_device(sdev); - release_sdev: - scsi_device_put(sdev); + + return rc; } static int @@ -4701,7 +3128,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) return r; ioc = pci_get_drvdata(pdev); - mptsas_fw_event_off(ioc); ioc->DoneCtx = mptsasDoneCtx; ioc->TaskCtx = mptsasTaskCtx; ioc->InternalCtx = mptsasInternalCtx; @@ -4785,15 +3211,17 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) * A slightly different algorithm is required for * 64bit SGEs. */ - scale = ioc->req_sz/ioc->SGE_size; - if (ioc->sg_addr_size == sizeof(u64)) { + scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); + if (sizeof(dma_addr_t) == sizeof(u64)) { numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + - (ioc->req_sz - 60) / ioc->SGE_size; + (ioc->req_sz - 60) / (sizeof(dma_addr_t) + + sizeof(u32)); } else { numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + - (ioc->req_sz - 64) / ioc->SGE_size; + (ioc->req_sz - 64) / (sizeof(dma_addr_t) + + sizeof(u32)); } if (numSGE < sh->sg_tablesize) { @@ -4823,6 +3251,9 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Clear the TM flags */ + hd->tmPending = 0; + hd->tmState = TM_STATE_NONE; + hd->resetPending = 0; hd->abortSCpnt = NULL; /* Clear the pointer used to store @@ -4842,11 +3273,10 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc->sas_data.ptClear = mpt_pt_clear; + init_waitqueue_head(&hd->scandv_waitq); + hd->scandv_wait_done = 0; hd->last_queue_full = 0; INIT_LIST_HEAD(&hd->target_reset_list); - INIT_LIST_HEAD(&ioc->sas_device_info_list); - mutex_init(&ioc->sas_device_info_mutex); - spin_unlock_irqrestore(&ioc->FreeQlock, flags); if (ioc->sas_data.ptClear==1) { @@ -4861,11 +3291,8 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_mptsas_probe; } - /* older firmware doesn't support expander events */ - if ((ioc->facts.HeaderVersion >> 8) < 0xE) - ioc->old_sas_discovery_protocal = 1; mptsas_scan_sas_topology(ioc); - mptsas_fw_event_on(ioc); + return 0; out_mptsas_probe: @@ -4874,25 +3301,12 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) return error; } -void -mptsas_shutdown(struct pci_dev *pdev) -{ - MPT_ADAPTER *ioc = pci_get_drvdata(pdev); - - mptsas_fw_event_off(ioc); - mptsas_cleanup_fw_event_q(ioc); -} - static void __devexit mptsas_remove(struct pci_dev *pdev) { MPT_ADAPTER *ioc = pci_get_drvdata(pdev); struct mptsas_portinfo *p, *n; int i; - mptsas_shutdown(pdev); - - mptsas_del_device_components(ioc); - ioc->sas_discovery_ignore_events = 1; sas_remove_host(ioc->sh); @@ -4901,12 +3315,11 @@ static void __devexit mptsas_remove(struct pci_dev *pdev) list_del(&p->list); for (i = 0 ; i < p->num_phys ; i++) mptsas_port_delete(ioc, p->phy_info[i].port_details); - kfree(p->phy_info); kfree(p); } mutex_unlock(&ioc->sas_topology_mutex); - ioc->hba_port_info = NULL; + mptscsih_remove(pdev); } @@ -4931,7 +3344,7 @@ static struct pci_driver mptsas_driver = { .id_table = mptsas_pci_table, .probe = mptsas_probe, .remove = __devexit_p(mptsas_remove), - .shutdown = mptsas_shutdown, + .shutdown = mptscsih_shutdown, #ifdef CONFIG_PM .suspend = mptscsih_suspend, .resume = mptscsih_resume, @@ -4951,12 +3364,10 @@ mptsas_init(void) return -ENODEV; mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER); - mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER); + mptsasTaskCtx = mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER); mptsasInternalCtx = mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER); mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER); - mptsasDeviceResetCtx = - mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER); mpt_event_register(mptsasDoneCtx, mptsas_event_process); mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset); @@ -4981,7 +3392,6 @@ mptsas_exit(void) mpt_deregister(mptsasInternalCtx); mpt_deregister(mptsasTaskCtx); mpt_deregister(mptsasDoneCtx); - mpt_deregister(mptsasDeviceResetCtx); } module_init(mptsas_init); diff --git a/trunk/drivers/message/fusion/mptsas.h b/trunk/drivers/message/fusion/mptsas.h index 953c2bfcf6aa..2b544e0877e6 100644 --- a/trunk/drivers/message/fusion/mptsas.h +++ b/trunk/drivers/message/fusion/mptsas.h @@ -53,7 +53,6 @@ struct mptsas_target_reset_event { struct list_head list; EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data; u8 target_reset_issued; - unsigned long time_count; }; enum mptsas_hotplug_action { @@ -61,37 +60,12 @@ enum mptsas_hotplug_action { MPTSAS_DEL_DEVICE, MPTSAS_ADD_RAID, MPTSAS_DEL_RAID, - MPTSAS_ADD_PHYSDISK, - MPTSAS_ADD_PHYSDISK_REPROBE, - MPTSAS_DEL_PHYSDISK, - MPTSAS_DEL_PHYSDISK_REPROBE, MPTSAS_ADD_INACTIVE_VOLUME, MPTSAS_IGNORE_EVENT, }; -struct mptsas_mapping{ - u8 id; - u8 channel; -}; - -struct mptsas_device_info { - struct list_head list; - struct mptsas_mapping os; /* operating system mapping*/ - struct mptsas_mapping fw; /* firmware mapping */ - u64 sas_address; - u32 device_info; /* specific bits for devices */ - u16 slot; /* enclosure slot id */ - u64 enclosure_logical_id; /*enclosure address */ - u8 is_logical_volume; /* is this logical volume */ - /* this belongs to volume */ - u8 is_hidden_raid_component; - /* this valid when is_hidden_raid_component set */ - u8 volume_id; - /* cached data for a removed device */ - u8 is_cached; -}; - struct mptsas_hotplug_event { + struct work_struct work; MPT_ADAPTER *ioc; enum mptsas_hotplug_action event_type; u64 sas_address; @@ -99,18 +73,11 @@ struct mptsas_hotplug_event { u8 id; u32 device_info; u16 handle; + u16 parent_handle; u8 phy_id; + u8 phys_disk_num_valid; /* hrc (hidden raid component) */ u8 phys_disk_num; /* hrc - unique index*/ - struct scsi_device *sdev; -}; - -struct fw_event_work { - struct list_head list; - struct delayed_work work; - MPT_ADAPTER *ioc; - u32 event; - u8 retries; - u8 event_data[1]; + u8 hidden_raid_component; /* hrc - don't expose*/ }; struct mptsas_discovery_event { diff --git a/trunk/drivers/message/fusion/mptscsih.c b/trunk/drivers/message/fusion/mptscsih.c index 024e8305bcf2..e62c6bc4ad33 100644 --- a/trunk/drivers/message/fusion/mptscsih.c +++ b/trunk/drivers/message/fusion/mptscsih.c @@ -80,7 +80,7 @@ MODULE_VERSION(my_VERSION); /* * Other private/forward protos... */ -struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i); +static struct scsi_cmnd * mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i); static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i); static void mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd); static int SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd); @@ -92,24 +92,18 @@ static int mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt, SCSIIORequest_t *pReq, int req_idx); static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx); static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply); +static int mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd); +static int mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout ); -int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, - int lun, int ctx2abort, ulong timeout); +static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout); int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply); -void -mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code); -static int mptscsih_get_completion_code(MPT_ADAPTER *ioc, - MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply); int mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r); static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd); static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice); -static int -mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type, - SCSITaskMgmtReply_t *pScsiTmReply); void mptscsih_remove(struct pci_dev *); void mptscsih_shutdown(struct pci_dev *); #ifdef CONFIG_PM @@ -119,6 +113,69 @@ int mptscsih_resume(struct pci_dev *pdev); #define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptscsih_add_sge - Place a simple SGE at address pAddr. + * @pAddr: virtual address for SGE + * @flagslength: SGE flags and data transfer length + * @dma_addr: Physical address + * + * This routine places a MPT request frame back on the MPT adapter's + * FreeQ. + */ +static inline void +mptscsih_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr) +{ + if (sizeof(dma_addr_t) == sizeof(u64)) { + SGESimple64_t *pSge = (SGESimple64_t *) pAddr; + u32 tmp = dma_addr & 0xFFFFFFFF; + + pSge->FlagsLength = cpu_to_le32(flagslength); + pSge->Address.Low = cpu_to_le32(tmp); + tmp = (u32) ((u64)dma_addr >> 32); + pSge->Address.High = cpu_to_le32(tmp); + + } else { + SGESimple32_t *pSge = (SGESimple32_t *) pAddr; + pSge->FlagsLength = cpu_to_le32(flagslength); + pSge->Address = cpu_to_le32(dma_addr); + } +} /* mptscsih_add_sge() */ + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptscsih_add_chain - Place a chain SGE at address pAddr. + * @pAddr: virtual address for SGE + * @next: nextChainOffset value (u32's) + * @length: length of next SGL segment + * @dma_addr: Physical address + * + * This routine places a MPT request frame back on the MPT adapter's + * FreeQ. + */ +static inline void +mptscsih_add_chain(char *pAddr, u8 next, u16 length, dma_addr_t dma_addr) +{ + if (sizeof(dma_addr_t) == sizeof(u64)) { + SGEChain64_t *pChain = (SGEChain64_t *) pAddr; + u32 tmp = dma_addr & 0xFFFFFFFF; + + pChain->Length = cpu_to_le16(length); + pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size(); + + pChain->NextChainOffset = next; + + pChain->Address.Low = cpu_to_le32(tmp); + tmp = (u32) ((u64)dma_addr >> 32); + pChain->Address.High = cpu_to_le32(tmp); + } else { + SGEChain32_t *pChain = (SGEChain32_t *) pAddr; + pChain->Length = cpu_to_le16(length); + pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size(); + pChain->NextChainOffset = next; + pChain->Address = cpu_to_le32(dma_addr); + } +} /* mptscsih_add_chain() */ /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* @@ -224,10 +281,10 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt, */ nextSGEset: - numSgeSlots = ((frm_sz - sgeOffset) / ioc->SGE_size); + numSgeSlots = ((frm_sz - sgeOffset) / (sizeof(u32) + sizeof(dma_addr_t)) ); numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots; - sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | sgdir; + sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | MPT_SGE_FLAGS_ADDRESSING | sgdir; /* Get first (num - 1) SG elements * Skip any SG entries with a length of 0 @@ -236,19 +293,17 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt, for (ii=0; ii < (numSgeThisFrame-1); ii++) { thisxfer = sg_dma_len(sg); if (thisxfer == 0) { - /* Get next SG element from the OS */ - sg = sg_next(sg); + sg = sg_next(sg); /* Get next SG element from the OS */ sg_done++; continue; } v2 = sg_dma_address(sg); - ioc->add_sge(psge, sgflags | thisxfer, v2); + mptscsih_add_sge(psge, sgflags | thisxfer, v2); - /* Get next SG element from the OS */ - sg = sg_next(sg); - psge += ioc->SGE_size; - sgeOffset += ioc->SGE_size; + sg = sg_next(sg); /* Get next SG element from the OS */ + psge += (sizeof(u32) + sizeof(dma_addr_t)); + sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); sg_done++; } @@ -265,8 +320,12 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt, thisxfer = sg_dma_len(sg); v2 = sg_dma_address(sg); - ioc->add_sge(psge, sgflags | thisxfer, v2); - sgeOffset += ioc->SGE_size; + mptscsih_add_sge(psge, sgflags | thisxfer, v2); + /* + sg = sg_next(sg); + psge += (sizeof(u32) + sizeof(dma_addr_t)); + */ + sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); sg_done++; if (chainSge) { @@ -275,8 +334,7 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt, * Update the chain element * Offset and Length fields. */ - ioc->add_chain((char *)chainSge, 0, sgeOffset, - ioc->ChainBufferDMA + chain_dma_off); + mptscsih_add_chain((char *)chainSge, 0, sgeOffset, ioc->ChainBufferDMA + chain_dma_off); } else { /* The current buffer is the original MF * and there is no Chain buffer. @@ -309,7 +367,7 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt, * set properly). */ if (sg_done) { - u32 *ptmp = (u32 *) (psge - ioc->SGE_size); + u32 *ptmp = (u32 *) (psge - (sizeof(u32) + sizeof(dma_addr_t))); sgflags = le32_to_cpu(*ptmp); sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT; *ptmp = cpu_to_le32(sgflags); @@ -323,9 +381,8 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt, * Old chain element is now complete. */ u8 nextChain = (u8) (sgeOffset >> 2); - sgeOffset += ioc->SGE_size; - ioc->add_chain((char *)chainSge, nextChain, sgeOffset, - ioc->ChainBufferDMA + chain_dma_off); + sgeOffset += (sizeof(u32) + sizeof(dma_addr_t)); + mptscsih_add_chain((char *)chainSge, nextChain, sgeOffset, ioc->ChainBufferDMA + chain_dma_off); } else { /* The original MF buffer requires a chain buffer - * set the offset. @@ -535,15 +592,14 @@ mptscsih_info_scsiio(MPT_ADAPTER *ioc, struct scsi_cmnd *sc, SCSIIOReply_t * pSc } scsi_print_command(sc); - printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d, lun = %d\n", - ioc->name, pScsiReply->Bus, pScsiReply->TargetID, sc->device->lun); + printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d\n", + ioc->name, pScsiReply->Bus, pScsiReply->TargetID); printk(MYIOC_s_DEBUG_FMT "\trequest_len = %d, underflow = %d, " "resid = %d\n", ioc->name, scsi_bufflen(sc), sc->underflow, scsi_get_resid(sc)); printk(MYIOC_s_DEBUG_FMT "\ttag = %d, transfer_count = %d, " "sc->result = %08X\n", ioc->name, le16_to_cpu(pScsiReply->TaskTag), le32_to_cpu(pScsiReply->TransferCount), sc->result); - printk(MYIOC_s_DEBUG_FMT "\tiocstatus = %s (0x%04x), " "scsi_status = %s (0x%02x), scsi_state = (0x%02x)\n", ioc->name, desc, ioc_status, desc1, pScsiReply->SCSIStatus, @@ -598,14 +654,16 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); req_idx_MR = (mr != NULL) ? le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx; - - /* Special case, where already freed message frame is received from - * Firmware. It happens with Resetting IOC. - * Return immediately. Do not care - */ if ((req_idx != req_idx_MR) || - (le32_to_cpu(mf->u.frame.linkage.arg1) == 0xdeadbeaf)) + (mf->u.frame.linkage.arg1 == 0xdeadbeaf)) { + printk(MYIOC_s_ERR_FMT "Received a mf that was already freed\n", + ioc->name); + printk (MYIOC_s_ERR_FMT + "req_idx=%x req_idx_MR=%x mf=%p mr=%p sc=%p\n", + ioc->name, req_idx, req_idx_MR, mf, mr, + mptscsih_get_scsi_lookup(ioc, req_idx_MR)); return 0; + } sc = mptscsih_getclear_scsi_lookup(ioc, req_idx); if (sc == NULL) { @@ -752,16 +810,12 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) */ case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ + case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ /* Linux handles an unsolicited DID_RESET better * than an unsolicited DID_ABORT. */ sc->result = DID_RESET << 16; - case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ - if (ioc->bus_type == FC) - sc->result = DID_ERROR << 16; - else - sc->result = DID_RESET << 16; break; case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */ @@ -938,9 +992,9 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd) scsi_dma_unmap(sc); sc->result = DID_RESET << 16; sc->host_scribble = NULL; - dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT - "completing cmds: fw_channel %d, fw_id %d, sc=%p, mf = %p, " - "idx=%x\n", ioc->name, channel, id, sc, mf, ii)); + sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT + "completing cmds: fw_channel %d, fw_id %d, sc=%p," + " mf = %p, idx=%x\n", ioc->name, channel, id, sc, mf, ii); sc->scsi_done(sc); } } @@ -999,11 +1053,9 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice) scsi_dma_unmap(sc); sc->host_scribble = NULL; sc->result = DID_NO_CONNECT << 16; - dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device, - MYIOC_s_FMT "completing cmds: fw_channel %d, " - "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name, - vdevice->vtarget->channel, vdevice->vtarget->id, - sc, mf, ii)); + sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT "completing cmds: fw_channel %d," + "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name, vdevice->vtarget->channel, + vdevice->vtarget->id, sc, mf, ii); sc->scsi_done(sc); spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); } @@ -1294,6 +1346,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) MPT_FRAME_HDR *mf; SCSIIORequest_t *pScsiReq; VirtDevice *vdevice = SCpnt->device->hostdata; + int lun; u32 datalen; u32 scsictl; u32 scsidir; @@ -1304,12 +1357,13 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) hd = shost_priv(SCpnt->device->host); ioc = hd->ioc; + lun = SCpnt->device->lun; SCpnt->scsi_done = done; dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n", ioc->name, SCpnt, done)); - if (ioc->taskmgmt_quiesce_io) { + if (hd->resetPending) { dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "qcmd: SCpnt=%p timeout + 60HZ\n", ioc->name, SCpnt)); return SCSI_MLQUEUE_HOST_BUSY; @@ -1368,7 +1422,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) pScsiReq->CDBLength = SCpnt->cmd_len; pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE; pScsiReq->Reserved = 0; - pScsiReq->MsgFlags = mpt_msg_flags(ioc); + pScsiReq->MsgFlags = mpt_msg_flags(); int_to_scsilun(SCpnt->device->lun, (struct scsi_lun *)pScsiReq->LUN); pScsiReq->Control = cpu_to_le32(scsictl); @@ -1394,8 +1448,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) */ if (datalen == 0) { /* Add a NULL SGE */ - ioc->add_sge((char *)&pScsiReq->SGL, - MPT_SGE_FLAGS_SSIMPLE_READ | 0, + mptscsih_add_sge((char *)&pScsiReq->SGL, MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1); } else { /* Add a 32 or 64 bit SGE */ @@ -1475,8 +1528,8 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx) /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** - * mptscsih_IssueTaskMgmt - Generic send Task Management function. - * @hd: Pointer to MPT_SCSI_HOST structure + * mptscsih_TMHandler - Generic handler for SCSI Task Management. + * @hd: Pointer to MPT SCSI HOST structure * @type: Task Management type * @channel: channel number for task management * @id: Logical Target ID for reset (if appropriate) @@ -1484,68 +1537,145 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx) * @ctx2abort: Context for the task to be aborted (if appropriate) * @timeout: timeout for task management control * - * Remark: _HardResetHandler can be invoked from an interrupt thread (timer) - * or a non-interrupt thread. In the former, must not call schedule(). + * Fall through to mpt_HardResetHandler if: not operational, too many + * failed TM requests or handshake failure. * - * Not all fields are meaningfull for all task types. + * Remark: Currently invoked from a non-interrupt thread (_bh). * - * Returns 0 for SUCCESS, or FAILED. + * Note: With old EH code, at most 1 SCSI TaskMgmt function per IOC + * will be active. * + * Returns 0 for SUCCESS, or %FAILED. **/ int -mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, - int ctx2abort, ulong timeout) +mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout) { - MPT_FRAME_HDR *mf; - SCSITaskMgmt_t *pScsiTm; - int ii; - int retval; - MPT_ADAPTER *ioc = hd->ioc; - unsigned long timeleft; - u8 issue_hard_reset; + MPT_ADAPTER *ioc; + int rc = -1; u32 ioc_raw_state; - unsigned long time_count; + unsigned long flags; + + ioc = hd->ioc; + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler Entered!\n", ioc->name)); + + // SJR - CHECKME - Can we avoid this here? + // (mpt_HardResetHandler has this check...) + spin_lock_irqsave(&ioc->diagLock, flags); + if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)) { + spin_unlock_irqrestore(&ioc->diagLock, flags); + return FAILED; + } + spin_unlock_irqrestore(&ioc->diagLock, flags); + + /* Wait a fixed amount of time for the TM pending flag to be cleared. + * If we time out and not bus reset, then we return a FAILED status + * to the caller. + * The call to mptscsih_tm_pending_wait() will set the pending flag + * if we are + * successful. Otherwise, reload the FW. + */ + if (mptscsih_tm_pending_wait(hd) == FAILED) { + if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler abort: " + "Timed out waiting for last TM (%d) to complete! \n", + ioc->name, hd->tmPending)); + return FAILED; + } else if (type == MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET) { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler target " + "reset: Timed out waiting for last TM (%d) " + "to complete! \n", ioc->name, + hd->tmPending)); + return FAILED; + } else if (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler bus reset: " + "Timed out waiting for last TM (%d) to complete! \n", + ioc->name, hd->tmPending)); + return FAILED; + } + } else { + spin_lock_irqsave(&ioc->FreeQlock, flags); + hd->tmPending |= (1 << type); + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + } - issue_hard_reset = 0; ioc_raw_state = mpt_GetIocState(ioc, 0); if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) { printk(MYIOC_s_WARN_FMT - "TaskMgmt type=%x: IOC Not operational (0x%x)!\n", + "TM Handler for type=%x: IOC Not operational (0x%x)!\n", ioc->name, type, ioc_raw_state); - printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n", - ioc->name, __func__); + printk(MYIOC_s_WARN_FMT " Issuing HardReset!!\n", ioc->name); if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0) - printk(MYIOC_s_WARN_FMT "TaskMgmt HardReset " + printk(MYIOC_s_WARN_FMT "TMHandler: HardReset " "FAILED!!\n", ioc->name); - return 0; + return FAILED; } if (ioc_raw_state & MPI_DOORBELL_ACTIVE) { printk(MYIOC_s_WARN_FMT - "TaskMgmt type=%x: ioc_state: " + "TM Handler for type=%x: ioc_state: " "DOORBELL_ACTIVE (0x%x)!\n", ioc->name, type, ioc_raw_state); return FAILED; } - mutex_lock(&ioc->taskmgmt_cmds.mutex); - if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) { - mf = NULL; - retval = FAILED; - goto out; - } + /* Isse the Task Mgmt request. + */ + if (hd->hard_resets < -1) + hd->hard_resets++; + + rc = mptscsih_IssueTaskMgmt(hd, type, channel, id, lun, + ctx2abort, timeout); + if (rc) + printk(MYIOC_s_INFO_FMT "Issue of TaskMgmt failed!\n", + ioc->name); + else + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Issue of TaskMgmt Successful!\n", + ioc->name)); + + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "TMHandler rc = %d!\n", ioc->name, rc)); + + return rc; +} + + +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptscsih_IssueTaskMgmt - Generic send Task Management function. + * @hd: Pointer to MPT_SCSI_HOST structure + * @type: Task Management type + * @channel: channel number for task management + * @id: Logical Target ID for reset (if appropriate) + * @lun: Logical Unit for reset (if appropriate) + * @ctx2abort: Context for the task to be aborted (if appropriate) + * @timeout: timeout for task management control + * + * Remark: _HardResetHandler can be invoked from an interrupt thread (timer) + * or a non-interrupt thread. In the former, must not call schedule(). + * + * Not all fields are meaningfull for all task types. + * + * Returns 0 for SUCCESS, or FAILED. + * + **/ +static int +mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout) +{ + MPT_FRAME_HDR *mf; + SCSITaskMgmt_t *pScsiTm; + int ii; + int retval; + MPT_ADAPTER *ioc = hd->ioc; /* Return Fail to calling function if no message frames available. */ if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "TaskMgmt no msg frames!!\n", ioc->name)); - retval = FAILED; - mpt_clear_taskmgmt_in_progress_flag(ioc); - goto out; + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "IssueTaskMgmt, no msg frames!!\n", + ioc->name)); + return FAILED; } - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n", + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n", ioc->name, mf)); /* Format the Request @@ -1569,14 +1699,11 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, pScsiTm->TaskMsgContext = ctx2abort; - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt: ctx2abort (0x%08x) " - "task_type = 0x%02X, timeout = %ld\n", ioc->name, ctx2abort, - type, timeout)); + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt: ctx2abort (0x%08x) " + "type=%d\n", ioc->name, ctx2abort, type)); DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)pScsiTm); - INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status) - time_count = jiffies; if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) && (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf); @@ -1584,50 +1711,47 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, retval = mpt_send_handshake_request(ioc->TaskCtx, ioc, sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP); if (retval) { - dfailprintk(ioc, printk(MYIOC_s_ERR_FMT - "TaskMgmt handshake FAILED!(mf=%p, rc=%d) \n", - ioc->name, mf, retval)); - mpt_free_msg_frame(ioc, mf); - mpt_clear_taskmgmt_in_progress_flag(ioc); - goto out; + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "send_handshake FAILED!" + " (hd %p, ioc %p, mf %p, rc=%d) \n", ioc->name, hd, + ioc, mf, retval)); + goto fail_out; } } - timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, - timeout*HZ); - if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { - retval = FAILED; - dtmprintk(ioc, printk(MYIOC_s_ERR_FMT - "TaskMgmt TIMED OUT!(mf=%p)\n", ioc->name, mf)); - mpt_clear_taskmgmt_in_progress_flag(ioc); - if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) - goto out; - issue_hard_reset = 1; - goto out; + if(mptscsih_tm_wait_for_completion(hd, timeout) == FAILED) { + dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "task management request TIMED OUT!" + " (hd %p, ioc %p, mf %p) \n", ioc->name, hd, + ioc, mf)); + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n", + ioc->name)); + retval = mpt_HardResetHandler(ioc, CAN_SLEEP); + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rc=%d \n", + ioc->name, retval)); + goto fail_out; } - retval = mptscsih_taskmgmt_reply(ioc, type, - (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply); - - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "TaskMgmt completed (%d seconds)\n", - ioc->name, jiffies_to_msecs(jiffies - time_count)/1000)); + /* + * Handle success case, see if theres a non-zero ioc_status. + */ + if (hd->tm_iocstatus == MPI_IOCSTATUS_SUCCESS || + hd->tm_iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED || + hd->tm_iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED) + retval = 0; + else + retval = FAILED; - out: + return retval; - CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status) - if (issue_hard_reset) { - printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", - ioc->name, __func__); - retval = mpt_HardResetHandler(ioc, CAN_SLEEP); - mpt_free_msg_frame(ioc, mf); - } + fail_out: - retval = (retval == 0) ? 0 : FAILED; - mutex_unlock(&ioc->taskmgmt_cmds.mutex); - return retval; + /* + * Free task management mf, and corresponding tm flags + */ + mpt_free_msg_frame(ioc, mf); + hd->tmPending = 0; + hd->tmState = TM_STATE_NONE; + return FAILED; } -EXPORT_SYMBOL(mptscsih_IssueTaskMgmt); static int mptscsih_get_tm_timeout(MPT_ADAPTER *ioc) @@ -1714,8 +1838,13 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) goto out; } - if (ioc->timeouts < -1) - ioc->timeouts++; + if (hd->resetPending) { + retval = FAILED; + goto out; + } + + if (hd->timeouts < -1) + hd->timeouts++; if (mpt_fwfault_debug) mpt_halt_firmware(ioc); @@ -1732,30 +1861,22 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) hd->abortSCpnt = SCpnt; - retval = mptscsih_IssueTaskMgmt(hd, - MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, - vdevice->vtarget->channel, - vdevice->vtarget->id, vdevice->lun, - ctx2abort, mptscsih_get_tm_timeout(ioc)); + retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, + vdevice->vtarget->channel, vdevice->vtarget->id, vdevice->lun, + ctx2abort, mptscsih_get_tm_timeout(ioc)); if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx && - SCpnt->serial_number == sn) { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "task abort: command still in active list! (sc=%p)\n", - ioc->name, SCpnt)); + SCpnt->serial_number == sn) retval = FAILED; - } else { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "task abort: command cleared from active list! (sc=%p)\n", - ioc->name, SCpnt)); - retval = SUCCESS; - } out: printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n", - ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt); + ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); - return retval; + if (retval == 0) + return SUCCESS; + else + return FAILED; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -1788,9 +1909,14 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt) ioc->name, SCpnt); scsi_print_command(SCpnt); + if (hd->resetPending) { + retval = FAILED; + goto out; + } + vdevice = SCpnt->device->hostdata; if (!vdevice || !vdevice->vtarget) { - retval = SUCCESS; + retval = 0; goto out; } @@ -1801,11 +1927,9 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt) goto out; } - retval = mptscsih_IssueTaskMgmt(hd, - MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, - vdevice->vtarget->channel, - vdevice->vtarget->id, 0, 0, - mptscsih_get_tm_timeout(ioc)); + retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, + vdevice->vtarget->channel, vdevice->vtarget->id, 0, 0, + mptscsih_get_tm_timeout(ioc)); out: printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n", @@ -1848,16 +1972,12 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt) ioc->name, SCpnt); scsi_print_command(SCpnt); - if (ioc->timeouts < -1) - ioc->timeouts++; + if (hd->timeouts < -1) + hd->timeouts++; vdevice = SCpnt->device->hostdata; - if (!vdevice || !vdevice->vtarget) - return SUCCESS; - retval = mptscsih_IssueTaskMgmt(hd, - MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, - vdevice->vtarget->channel, 0, 0, 0, - mptscsih_get_tm_timeout(ioc)); + retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, + vdevice->vtarget->channel, 0, 0, 0, mptscsih_get_tm_timeout(ioc)); printk(MYIOC_s_INFO_FMT "bus reset: %s (sc=%p)\n", ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); @@ -1881,9 +2001,8 @@ int mptscsih_host_reset(struct scsi_cmnd *SCpnt) { MPT_SCSI_HOST * hd; - int status = SUCCESS; + int retval; MPT_ADAPTER *ioc; - int retval; /* If we can't locate the host to reset, then we failed. */ if ((hd = shost_priv(SCpnt->device->host)) == NULL){ @@ -1902,71 +2021,86 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt) /* If our attempts to reset the host failed, then return a failed * status. The host will be taken off line by the SCSI mid-layer. */ - retval = mpt_HardResetHandler(ioc, CAN_SLEEP); - if (retval < 0) - status = FAILED; - else - status = SUCCESS; + if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0) { + retval = FAILED; + } else { + /* Make sure TM pending is cleared and TM state is set to + * NONE. + */ + retval = 0; + hd->tmPending = 0; + hd->tmState = TM_STATE_NONE; + } printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n", ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt); - return status; + return retval; } +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptscsih_tm_pending_wait - wait for pending task management request to complete + * @hd: Pointer to MPT host structure. + * + * Returns {SUCCESS,FAILED}. + */ static int -mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type, - SCSITaskMgmtReply_t *pScsiTmReply) +mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd) { - u16 iocstatus; - u32 termination_count; - int retval; - - if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) { - retval = FAILED; - goto out; - } - - DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply); - - iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK; - termination_count = le32_to_cpu(pScsiTmReply->TerminationCount); - - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "TaskMgmt fw_channel = %d, fw_id = %d, task_type = 0x%02X,\n" - "\tiocstatus = 0x%04X, loginfo = 0x%08X, response_code = 0x%02X,\n" - "\tterm_cmnds = %d\n", ioc->name, pScsiTmReply->Bus, - pScsiTmReply->TargetID, type, le16_to_cpu(pScsiTmReply->IOCStatus), - le32_to_cpu(pScsiTmReply->IOCLogInfo), pScsiTmReply->ResponseCode, - termination_count)); + unsigned long flags; + int loop_count = 4 * 10; /* Wait 10 seconds */ + int status = FAILED; + MPT_ADAPTER *ioc = hd->ioc; - if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 && - pScsiTmReply->ResponseCode) - mptscsih_taskmgmt_response_code(ioc, - pScsiTmReply->ResponseCode); + do { + spin_lock_irqsave(&ioc->FreeQlock, flags); + if (hd->tmState == TM_STATE_NONE) { + hd->tmState = TM_STATE_IN_PROGRESS; + hd->tmPending = 1; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + status = SUCCESS; + break; + } + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + msleep(250); + } while (--loop_count); - if (iocstatus == MPI_IOCSTATUS_SUCCESS) { - retval = 0; - goto out; - } + return status; +} - retval = FAILED; - if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { - if (termination_count == 1) - retval = 0; - goto out; - } +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +/** + * mptscsih_tm_wait_for_completion - wait for completion of TM task + * @hd: Pointer to MPT host structure. + * @timeout: timeout value + * + * Returns {SUCCESS,FAILED}. + */ +static int +mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout ) +{ + unsigned long flags; + int loop_count = 4 * timeout; + int status = FAILED; + MPT_ADAPTER *ioc = hd->ioc; - if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED || - iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED) - retval = 0; + do { + spin_lock_irqsave(&ioc->FreeQlock, flags); + if(hd->tmPending == 0) { + status = SUCCESS; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + break; + } + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + msleep(250); + } while (--loop_count); - out: - return retval; + return status; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ -void +static void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code) { char *desc; @@ -2000,7 +2134,6 @@ mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code) printk(MYIOC_s_INFO_FMT "Response Code(0x%08x): F/W: %s\n", ioc->name, response_code, desc); } -EXPORT_SYMBOL(mptscsih_taskmgmt_response_code); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** @@ -2017,28 +2150,97 @@ EXPORT_SYMBOL(mptscsih_taskmgmt_response_code); * Returns 1 indicating alloc'd request frame ptr should be freed. **/ int -mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, - MPT_FRAME_HDR *mr) +mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) { - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "TaskMgmt completed (mf=%p, mr=%p)\n", ioc->name, mf, mr)); + SCSITaskMgmtReply_t *pScsiTmReply; + SCSITaskMgmt_t *pScsiTmReq; + MPT_SCSI_HOST *hd; + unsigned long flags; + u16 iocstatus; + u8 tmType; + u32 termination_count; - ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed (mf=%p,mr=%p)\n", + ioc->name, mf, mr)); + if (!ioc->sh) { + dtmprintk(ioc, printk(MYIOC_s_WARN_FMT + "TaskMgmt Complete: NULL Scsi Host Ptr\n", ioc->name)); + return 1; + } + + if (mr == NULL) { + dtmprintk(ioc, printk(MYIOC_s_WARN_FMT + "ERROR! TaskMgmt Reply: NULL Request %p\n", ioc->name, mf)); + return 1; + } - if (!mr) + hd = shost_priv(ioc->sh); + pScsiTmReply = (SCSITaskMgmtReply_t*)mr; + pScsiTmReq = (SCSITaskMgmt_t*)mf; + tmType = pScsiTmReq->TaskType; + iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK; + termination_count = le32_to_cpu(pScsiTmReply->TerminationCount); + + if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 && + pScsiTmReply->ResponseCode) + mptscsih_taskmgmt_response_code(ioc, + pScsiTmReply->ResponseCode); + DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply); + +#ifdef CONFIG_FUSION_LOGGING + if ((ioc->debug_level & MPT_DEBUG_REPLY) || + (ioc->debug_level & MPT_DEBUG_TM )) + printk("%s: ha=%d [%d:%d:0] task_type=0x%02X " + "iocstatus=0x%04X\n\tloginfo=0x%08X response_code=0x%02X " + "term_cmnds=%d\n", __func__, ioc->id, pScsiTmReply->Bus, + pScsiTmReply->TargetID, pScsiTmReq->TaskType, + le16_to_cpu(pScsiTmReply->IOCStatus), + le32_to_cpu(pScsiTmReply->IOCLogInfo),pScsiTmReply->ResponseCode, + le32_to_cpu(pScsiTmReply->TerminationCount)); +#endif + if (!iocstatus) { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT " TaskMgmt SUCCESS\n", ioc->name)); + hd->abortSCpnt = NULL; goto out; + } - ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID; - memcpy(ioc->taskmgmt_cmds.reply, mr, - min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength)); - out: - if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) { - mpt_clear_taskmgmt_in_progress_flag(ioc); - ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING; - complete(&ioc->taskmgmt_cmds.done); - return 1; + /* Error? (anything non-zero?) */ + + /* clear flags and continue. + */ + switch (tmType) { + + case MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + if (termination_count == 1) + iocstatus = MPI_IOCSTATUS_SCSI_TASK_TERMINATED; + hd->abortSCpnt = NULL; + break; + + case MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS: + + /* If an internal command is present + * or the TM failed - reload the FW. + * FC FW may respond FAILED to an ABORT + */ + if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED || + hd->cmdPtr) + if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0) + printk(MYIOC_s_WARN_FMT " Firmware Reload FAILED!!\n", ioc->name); + break; + + case MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + default: + break; } - return 0; + + out: + spin_lock_irqsave(&ioc->FreeQlock, flags); + hd->tmPending = 0; + hd->tmState = TM_STATE_NONE; + hd->tm_iocstatus = iocstatus; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + + return 1; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -2088,10 +2290,8 @@ int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id) { struct inactive_raid_component_info *component_info; - int i, j; - RaidPhysDiskPage1_t *phys_disk; + int i; int rc = 0; - int num_paths; if (!ioc->raid_data.pIocPg3) goto out; @@ -2103,45 +2303,6 @@ mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id) } } - if (ioc->bus_type != SAS) - goto out; - - /* - * Check if dual path - */ - for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) { - num_paths = mpt_raid_phys_disk_get_num_paths(ioc, - ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum); - if (num_paths < 2) - continue; - phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) + - (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL); - if (!phys_disk) - continue; - if ((mpt_raid_phys_disk_pg1(ioc, - ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum, - phys_disk))) { - kfree(phys_disk); - continue; - } - for (j = 0; j < num_paths; j++) { - if ((phys_disk->Path[j].Flags & - MPI_RAID_PHYSDISK1_FLAG_INVALID)) - continue; - if ((phys_disk->Path[j].Flags & - MPI_RAID_PHYSDISK1_FLAG_BROKEN)) - continue; - if ((id == phys_disk->Path[j].PhysDiskID) && - (channel == phys_disk->Path[j].PhysDiskBus)) { - rc = 1; - kfree(phys_disk); - goto out; - } - } - kfree(phys_disk); - } - - /* * Check inactive list for matching phys disks */ @@ -2166,10 +2327,8 @@ u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id) { struct inactive_raid_component_info *component_info; - int i, j; - RaidPhysDiskPage1_t *phys_disk; + int i; int rc = -ENXIO; - int num_paths; if (!ioc->raid_data.pIocPg3) goto out; @@ -2181,44 +2340,6 @@ mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id) } } - if (ioc->bus_type != SAS) - goto out; - - /* - * Check if dual path - */ - for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) { - num_paths = mpt_raid_phys_disk_get_num_paths(ioc, - ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum); - if (num_paths < 2) - continue; - phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) + - (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL); - if (!phys_disk) - continue; - if ((mpt_raid_phys_disk_pg1(ioc, - ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum, - phys_disk))) { - kfree(phys_disk); - continue; - } - for (j = 0; j < num_paths; j++) { - if ((phys_disk->Path[j].Flags & - MPI_RAID_PHYSDISK1_FLAG_INVALID)) - continue; - if ((phys_disk->Path[j].Flags & - MPI_RAID_PHYSDISK1_FLAG_BROKEN)) - continue; - if ((id == phys_disk->Path[j].PhysDiskID) && - (channel == phys_disk->Path[j].PhysDiskBus)) { - rc = phys_disk->PhysDiskNum; - kfree(phys_disk); - goto out; - } - } - kfree(phys_disk); - } - /* * Check inactive list for matching phys disks */ @@ -2336,6 +2457,7 @@ mptscsih_slave_configure(struct scsi_device *sdev) sdev->ppr, sdev->inquiry_len)); vdevice->configured_lun = 1; + mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH); dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Queue depth=%d, tflags=%x\n", @@ -2347,7 +2469,6 @@ mptscsih_slave_configure(struct scsi_device *sdev) ioc->name, vtarget->negoFlags, vtarget->maxOffset, vtarget->minSyncFactor)); - mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH); dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "tagged %d, simple %d, ordered %d\n", ioc->name,sdev->tagged_supported, sdev->simple_tags, @@ -2421,13 +2542,15 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR } /** - * mptscsih_get_scsi_lookup - retrieves scmd entry + * mptscsih_get_scsi_lookup * @ioc: Pointer to MPT_ADAPTER structure * @i: index into the array * + * retrieves scmd entry from ScsiLookup[] array list + * * Returns the scsi_cmd pointer - */ -struct scsi_cmnd * + **/ +static struct scsi_cmnd * mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) { unsigned long flags; @@ -2439,15 +2562,15 @@ mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i) return scmd; } -EXPORT_SYMBOL(mptscsih_get_scsi_lookup); /** - * mptscsih_getclear_scsi_lookup - retrieves and clears scmd entry from ScsiLookup[] array list + * mptscsih_getclear_scsi_lookup * @ioc: Pointer to MPT_ADAPTER structure * @i: index into the array * - * Returns the scsi_cmd pointer + * retrieves and clears scmd entry from ScsiLookup[] array list * + * Returns the scsi_cmd pointer **/ static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i) @@ -2512,33 +2635,94 @@ int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) { MPT_SCSI_HOST *hd; + unsigned long flags; + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT + ": IOC %s_reset routed to SCSI host driver!\n", + ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : ( + reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post"))); + + /* If a FW reload request arrives after base installed but + * before all scsi hosts have been attached, then an alt_ioc + * may have a NULL sh pointer. + */ if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL) return 0; + else + hd = shost_priv(ioc->sh); - hd = shost_priv(ioc->sh); - switch (reset_phase) { - case MPT_IOC_SETUP_RESET: - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__)); - break; - case MPT_IOC_PRE_RESET: - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__)); + if (reset_phase == MPT_IOC_SETUP_RESET) { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Setup-Diag Reset\n", ioc->name)); + + /* Clean Up: + * 1. Set Hard Reset Pending Flag + * All new commands go to doneQ + */ + hd->resetPending = 1; + + } else if (reset_phase == MPT_IOC_PRE_RESET) { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Pre-Diag Reset\n", ioc->name)); + + /* 2. Flush running commands + * Clean ScsiLookup (and associated memory) + * AND clean mytaskQ + */ + + /* 2b. Reply to OS all known outstanding I/O commands. + */ mptscsih_flush_running_cmds(hd); - break; - case MPT_IOC_POST_RESET: - dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__)); - if (ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING) { - ioc->internal_cmds.status |= - MPT_MGMT_STATUS_DID_IOCRESET; - complete(&ioc->internal_cmds.done); + + /* 2c. If there was an internal command that + * has not completed, configuration or io request, + * free these resources. + */ + if (hd->cmdPtr) { + del_timer(&hd->timer); + mpt_free_msg_frame(ioc, hd->cmdPtr); } - break; - default: - break; + + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Pre-Reset complete.\n", ioc->name)); + + } else { + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Post-Diag Reset\n", ioc->name)); + + /* Once a FW reload begins, all new OS commands are + * redirected to the doneQ w/ a reset status. + * Init all control structures. + */ + + /* 2. Chain Buffer initialization + */ + + /* 4. Renegotiate to all devices, if SPI + */ + + /* 5. Enable new commands to be posted + */ + spin_lock_irqsave(&ioc->FreeQlock, flags); + hd->tmPending = 0; + spin_unlock_irqrestore(&ioc->FreeQlock, flags); + hd->resetPending = 0; + hd->tmState = TM_STATE_NONE; + + /* 6. If there was an internal command, + * wake this process up. + */ + if (hd->cmdPtr) { + /* + * Wake up the original calling thread + */ + hd->pLocal = &hd->localReply; + hd->pLocal->completion = MPT_SCANDV_DID_RESET; + hd->scandv_wait_done = 1; + wake_up(&hd->scandv_waitq); + hd->cmdPtr = NULL; + } + + dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Post-Reset complete.\n", ioc->name)); + } + return 1; /* currently means nothing really */ } @@ -2546,16 +2730,55 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase) int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) { + MPT_SCSI_HOST *hd; u8 event = le32_to_cpu(pEvReply->Event) & 0xFF; - devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "MPT event (=%02Xh) routed to SCSI host driver!\n", - ioc->name, event)); + devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n", + ioc->name, event)); - if ((event == MPI_EVENT_IOC_BUS_RESET || - event == MPI_EVENT_EXT_BUS_RESET) && - (ioc->bus_type == SPI) && (ioc->soft_resets < -1)) - ioc->soft_resets++; + if (ioc->sh == NULL || + ((hd = shost_priv(ioc->sh)) == NULL)) + return 1; + + switch (event) { + case MPI_EVENT_UNIT_ATTENTION: /* 03 */ + /* FIXME! */ + break; + case MPI_EVENT_IOC_BUS_RESET: /* 04 */ + case MPI_EVENT_EXT_BUS_RESET: /* 05 */ + if (hd && (ioc->bus_type == SPI) && (hd->soft_resets < -1)) + hd->soft_resets++; + break; + case MPI_EVENT_LOGOUT: /* 09 */ + /* FIXME! */ + break; + + case MPI_EVENT_RESCAN: /* 06 */ + break; + + /* + * CHECKME! Don't think we need to do + * anything for these, but... + */ + case MPI_EVENT_LINK_STATUS_CHANGE: /* 07 */ + case MPI_EVENT_LOOP_STATE_CHANGE: /* 08 */ + /* + * CHECKME! Falling thru... + */ + break; + + case MPI_EVENT_INTEGRATED_RAID: /* 0B */ + break; + + case MPI_EVENT_NONE: /* 00 */ + case MPI_EVENT_LOG_DATA: /* 01 */ + case MPI_EVENT_STATE_CHANGE: /* 02 */ + case MPI_EVENT_EVENT_CHANGE: /* 0A */ + default: + dprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": Ignoring event (=%02Xh)\n", + ioc->name, event)); + break; + } return 1; /* currently means nothing really */ } @@ -2586,44 +2809,153 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) * Used ONLY for DV and other internal commands. */ int -mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, - MPT_FRAME_HDR *reply) +mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) { + MPT_SCSI_HOST *hd; SCSIIORequest_t *pReq; - SCSIIOReply_t *pReply; - u8 cmd; + int completionCode; u16 req_idx; - u8 *sense_data; - int sz; - ioc->internal_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; - ioc->internal_cmds.completion_code = MPT_SCANDV_GOOD; - if (!reply) - goto out; + hd = shost_priv(ioc->sh); - pReply = (SCSIIOReply_t *) reply; - pReq = (SCSIIORequest_t *) req; - ioc->internal_cmds.completion_code = - mptscsih_get_completion_code(ioc, req, reply); - ioc->internal_cmds.status |= MPT_MGMT_STATUS_RF_VALID; - memcpy(ioc->internal_cmds.reply, reply, - min(MPT_DEFAULT_FRAME_SIZE, 4 * reply->u.reply.MsgLength)); - cmd = reply->u.hdr.Function; - if (((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) || - (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) && - (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)) { - req_idx = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx); - sense_data = ((u8 *)ioc->sense_buf_pool + - (req_idx * MPT_SENSE_BUFFER_ALLOC)); - sz = min_t(int, pReq->SenseBufferLength, - MPT_SENSE_BUFFER_ALLOC); - memcpy(ioc->internal_cmds.sense, sense_data, sz); + if ((mf == NULL) || + (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) { + printk(MYIOC_s_ERR_FMT + "ScanDvComplete, %s req frame ptr! (=%p)\n", + ioc->name, mf?"BAD":"NULL", (void *) mf); + goto wakeup; } - out: - if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING)) - return 0; - ioc->internal_cmds.status &= ~MPT_MGMT_STATUS_PENDING; - complete(&ioc->internal_cmds.done); + + del_timer(&hd->timer); + req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx); + mptscsih_set_scsi_lookup(ioc, req_idx, NULL); + pReq = (SCSIIORequest_t *) mf; + + if (mf != hd->cmdPtr) { + printk(MYIOC_s_WARN_FMT "ScanDvComplete (mf=%p, cmdPtr=%p, idx=%d)\n", + ioc->name, (void *)mf, (void *) hd->cmdPtr, req_idx); + } + hd->cmdPtr = NULL; + + ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScanDvComplete (mf=%p,mr=%p,idx=%d)\n", + ioc->name, mf, mr, req_idx)); + + hd->pLocal = &hd->localReply; + hd->pLocal->scsiStatus = 0; + + /* If target struct exists, clear sense valid flag. + */ + if (mr == NULL) { + completionCode = MPT_SCANDV_GOOD; + } else { + SCSIIOReply_t *pReply; + u16 status; + u8 scsi_status; + + pReply = (SCSIIOReply_t *) mr; + + status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK; + scsi_status = pReply->SCSIStatus; + + + switch(status) { + + case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */ + completionCode = MPT_SCANDV_SELECTION_TIMEOUT; + break; + + case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */ + case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ + case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */ + case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ + completionCode = MPT_SCANDV_DID_RESET; + break; + + case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */ + case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */ + case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */ + if (pReply->Function == MPI_FUNCTION_CONFIG) { + ConfigReply_t *pr = (ConfigReply_t *)mr; + completionCode = MPT_SCANDV_GOOD; + hd->pLocal->header.PageVersion = pr->Header.PageVersion; + hd->pLocal->header.PageLength = pr->Header.PageLength; + hd->pLocal->header.PageNumber = pr->Header.PageNumber; + hd->pLocal->header.PageType = pr->Header.PageType; + + } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) { + /* If the RAID Volume request is successful, + * return GOOD, else indicate that + * some type of error occurred. + */ + MpiRaidActionReply_t *pr = (MpiRaidActionReply_t *)mr; + if (le16_to_cpu(pr->ActionStatus) == MPI_RAID_ACTION_ASTATUS_SUCCESS) + completionCode = MPT_SCANDV_GOOD; + else + completionCode = MPT_SCANDV_SOME_ERROR; + memcpy(hd->pLocal->sense, pr, sizeof(hd->pLocal->sense)); + + } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) { + u8 *sense_data; + int sz; + + /* save sense data in global structure + */ + completionCode = MPT_SCANDV_SENSE; + hd->pLocal->scsiStatus = scsi_status; + sense_data = ((u8 *)ioc->sense_buf_pool + + (req_idx * MPT_SENSE_BUFFER_ALLOC)); + + sz = min_t(int, pReq->SenseBufferLength, + SCSI_STD_SENSE_BYTES); + memcpy(hd->pLocal->sense, sense_data, sz); + + ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT " Check Condition, sense ptr %p\n", + ioc->name, sense_data)); + } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) { + if (pReq->CDB[0] == INQUIRY) + completionCode = MPT_SCANDV_ISSUE_SENSE; + else + completionCode = MPT_SCANDV_DID_RESET; + } + else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS) + completionCode = MPT_SCANDV_DID_RESET; + else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED) + completionCode = MPT_SCANDV_DID_RESET; + else { + completionCode = MPT_SCANDV_GOOD; + hd->pLocal->scsiStatus = scsi_status; + } + break; + + case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */ + if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED) + completionCode = MPT_SCANDV_DID_RESET; + else + completionCode = MPT_SCANDV_SOME_ERROR; + break; + + default: + completionCode = MPT_SCANDV_SOME_ERROR; + break; + + } /* switch(status) */ + + } /* end of address reply case */ + + hd->pLocal->completion = completionCode; + + /* MF and RF are freed in mpt_interrupt + */ +wakeup: + /* Free Chain buffers (will never chain) in scan or dv */ + //mptscsih_freeChainBuffers(ioc, req_idx); + + /* + * Wake up the original calling thread + */ + hd->scandv_wait_done = 1; + wake_up(&hd->scandv_waitq); + return 1; } @@ -2672,95 +3004,6 @@ mptscsih_timer_expired(unsigned long data) return; } -/** - * mptscsih_get_completion_code - - * @ioc: Pointer to MPT_ADAPTER structure - * @reply: - * @cmd: - * - **/ -static int -mptscsih_get_completion_code(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, - MPT_FRAME_HDR *reply) -{ - SCSIIOReply_t *pReply; - MpiRaidActionReply_t *pr; - u8 scsi_status; - u16 status; - int completion_code; - - pReply = (SCSIIOReply_t *)reply; - status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK; - scsi_status = pReply->SCSIStatus; - - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "IOCStatus=%04xh, SCSIState=%02xh, SCSIStatus=%02xh," - "IOCLogInfo=%08xh\n", ioc->name, status, pReply->SCSIState, - scsi_status, le32_to_cpu(pReply->IOCLogInfo))); - - switch (status) { - - case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */ - completion_code = MPT_SCANDV_SELECTION_TIMEOUT; - break; - - case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */ - case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */ - case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */ - case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */ - completion_code = MPT_SCANDV_DID_RESET; - break; - - case MPI_IOCSTATUS_BUSY: - case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: - completion_code = MPT_SCANDV_BUSY; - break; - - case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */ - case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */ - case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */ - if (pReply->Function == MPI_FUNCTION_CONFIG) { - completion_code = MPT_SCANDV_GOOD; - } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) { - pr = (MpiRaidActionReply_t *)reply; - if (le16_to_cpu(pr->ActionStatus) == - MPI_RAID_ACTION_ASTATUS_SUCCESS) - completion_code = MPT_SCANDV_GOOD; - else - completion_code = MPT_SCANDV_SOME_ERROR; - } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) - completion_code = MPT_SCANDV_SENSE; - else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) { - if (req->u.scsireq.CDB[0] == INQUIRY) - completion_code = MPT_SCANDV_ISSUE_SENSE; - else - completion_code = MPT_SCANDV_DID_RESET; - } else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS) - completion_code = MPT_SCANDV_DID_RESET; - else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED) - completion_code = MPT_SCANDV_DID_RESET; - else if (scsi_status == MPI_SCSI_STATUS_BUSY) - completion_code = MPT_SCANDV_BUSY; - else - completion_code = MPT_SCANDV_GOOD; - break; - - case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */ - if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED) - completion_code = MPT_SCANDV_DID_RESET; - else - completion_code = MPT_SCANDV_SOME_ERROR; - break; - default: - completion_code = MPT_SCANDV_SOME_ERROR; - break; - - } /* switch(status) */ - - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT - " completionCode set to %08xh\n", ioc->name, completion_code)); - return completion_code; -} /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /** @@ -2787,27 +3030,22 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) { MPT_FRAME_HDR *mf; SCSIIORequest_t *pScsiReq; + SCSIIORequest_t ReqCopy; int my_idx, ii, dir; - int timeout; + int rc, cmdTimeout; + int in_isr; char cmdLen; char CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; - u8 cmd = io->cmd; - MPT_ADAPTER *ioc = hd->ioc; - int ret = 0; - unsigned long timeleft; - unsigned long flags; + char cmd = io->cmd; + MPT_ADAPTER *ioc = hd->ioc; - /* don't send internal command during diag reset */ - spin_lock_irqsave(&ioc->taskmgmt_lock, flags); - if (ioc->ioc_reset_in_progress) { - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); - dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: busy with host reset\n", ioc->name, __func__)); - return MPT_SCANDV_BUSY; + in_isr = in_interrupt(); + if (in_isr) { + dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Internal SCSI IO request not allowed in ISR context!\n", + ioc->name)); + return -EPERM; } - spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); - mutex_lock(&ioc->internal_cmds.mutex); /* Set command specific information */ @@ -2817,13 +3055,13 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) dir = MPI_SCSIIO_CONTROL_READ; CDB[0] = cmd; CDB[4] = io->size; - timeout = 10; + cmdTimeout = 10; break; case TEST_UNIT_READY: cmdLen = 6; dir = MPI_SCSIIO_CONTROL_READ; - timeout = 10; + cmdTimeout = 10; break; case START_STOP: @@ -2831,7 +3069,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) dir = MPI_SCSIIO_CONTROL_READ; CDB[0] = cmd; CDB[4] = 1; /*Spin up the disk */ - timeout = 15; + cmdTimeout = 15; break; case REQUEST_SENSE: @@ -2839,7 +3077,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) CDB[0] = cmd; CDB[4] = io->size; dir = MPI_SCSIIO_CONTROL_READ; - timeout = 10; + cmdTimeout = 10; break; case READ_BUFFER: @@ -2858,7 +3096,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) CDB[6] = (io->size >> 16) & 0xFF; CDB[7] = (io->size >> 8) & 0xFF; CDB[8] = io->size & 0xFF; - timeout = 10; + cmdTimeout = 10; break; case WRITE_BUFFER: @@ -2873,21 +3111,21 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) CDB[6] = (io->size >> 16) & 0xFF; CDB[7] = (io->size >> 8) & 0xFF; CDB[8] = io->size & 0xFF; - timeout = 10; + cmdTimeout = 10; break; case RESERVE: cmdLen = 6; dir = MPI_SCSIIO_CONTROL_READ; CDB[0] = cmd; - timeout = 10; + cmdTimeout = 10; break; case RELEASE: cmdLen = 6; dir = MPI_SCSIIO_CONTROL_READ; CDB[0] = cmd; - timeout = 10; + cmdTimeout = 10; break; case SYNCHRONIZE_CACHE: @@ -2895,23 +3133,20 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) dir = MPI_SCSIIO_CONTROL_READ; CDB[0] = cmd; // CDB[1] = 0x02; /* set immediate bit */ - timeout = 10; + cmdTimeout = 10; break; default: /* Error Case */ - ret = -EFAULT; - goto out; + return -EFAULT; } /* Get and Populate a free Frame - * MsgContext set in mpt_get_msg_frame call */ if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { - dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: No msg frames!\n", - ioc->name, __func__)); - ret = MPT_SCANDV_BUSY; - goto out; + dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "No msg frames!\n", + ioc->name)); + return -EBUSY; } pScsiReq = (SCSIIORequest_t *) mf; @@ -2937,7 +3172,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) pScsiReq->Reserved = 0; - pScsiReq->MsgFlags = mpt_msg_flags(ioc); + pScsiReq->MsgFlags = mpt_msg_flags(); /* MsgContext set in mpt_get_msg_fram call */ int_to_scsilun(io->lun, (struct scsi_lun *)pScsiReq->LUN); @@ -2949,58 +3184,74 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) if (cmd == REQUEST_SENSE) { pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED); - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: Untagged! 0x%02x\n", ioc->name, __func__, cmd)); + ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Untagged! 0x%2x\n", + ioc->name, cmd)); } - for (ii = 0; ii < 16; ii++) + for (ii=0; ii < 16; ii++) pScsiReq->CDB[ii] = CDB[ii]; pScsiReq->DataLength = cpu_to_le32(io->size); pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma + (my_idx * MPT_SENSE_BUFFER_ALLOC)); - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: Sending Command 0x%02x for fw_channel=%d fw_id=%d lun=%d\n", - ioc->name, __func__, cmd, io->channel, io->id, io->lun)); + ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Command 0x%x for (%d:%d:%d)\n", + ioc->name, cmd, io->channel, io->id, io->lun)); - if (dir == MPI_SCSIIO_CONTROL_READ) - ioc->add_sge((char *) &pScsiReq->SGL, - MPT_SGE_FLAGS_SSIMPLE_READ | io->size, io->data_dma); - else - ioc->add_sge((char *) &pScsiReq->SGL, - MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size, io->data_dma); + if (dir == MPI_SCSIIO_CONTROL_READ) { + mpt_add_sge((char *) &pScsiReq->SGL, + MPT_SGE_FLAGS_SSIMPLE_READ | io->size, + io->data_dma); + } else { + mpt_add_sge((char *) &pScsiReq->SGL, + MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size, + io->data_dma); + } - INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status) + /* The ISR will free the request frame, but we need + * the information to initialize the target. Duplicate. + */ + memcpy(&ReqCopy, pScsiReq, sizeof(SCSIIORequest_t)); + + /* Issue this command after: + * finish init + * add timer + * Wait until the reply has been received + * ScsiScanDvCtx callback function will + * set hd->pLocal; + * set scandv_wait_done and call wake_up + */ + hd->pLocal = NULL; + hd->timer.expires = jiffies + HZ*cmdTimeout; + hd->scandv_wait_done = 0; + + /* Save cmd pointer, for resource free if timeout or + * FW reload occurs + */ + hd->cmdPtr = mf; + + add_timer(&hd->timer); mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); - timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, - timeout*HZ); - if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { - ret = MPT_SCANDV_DID_RESET; - dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT - "%s: TIMED OUT for cmd=0x%02x\n", ioc->name, __func__, - cmd)); - if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) { - mpt_free_msg_frame(ioc, mf); - goto out; - } - if (!timeleft) { - printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", - ioc->name, __func__); - mpt_HardResetHandler(ioc, CAN_SLEEP); - mpt_free_msg_frame(ioc, mf); - } - goto out; - } + wait_event(hd->scandv_waitq, hd->scandv_wait_done); - ret = ioc->internal_cmds.completion_code; - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: success, rc=0x%02x\n", - ioc->name, __func__, ret)); + if (hd->pLocal) { + rc = hd->pLocal->completion; + hd->pLocal->skip = 0; - out: - CLEAR_MGMT_STATUS(ioc->internal_cmds.status) - mutex_unlock(&ioc->internal_cmds.mutex); - return ret; + /* Always set fatal error codes in some cases. + */ + if (rc == MPT_SCANDV_SELECTION_TIMEOUT) + rc = -ENXIO; + else if (rc == MPT_SCANDV_SOME_ERROR) + rc = -rc; + } else { + rc = -EFAULT; + /* This should never happen. */ + ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "_do_cmd: Null pLocal!!!\n", + ioc->name)); + } + + return rc; } /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ @@ -3240,7 +3491,6 @@ struct device_attribute *mptscsih_host_attrs[] = { &dev_attr_debug_level, NULL, }; - EXPORT_SYMBOL(mptscsih_host_attrs); EXPORT_SYMBOL(mptscsih_remove); @@ -3266,5 +3516,6 @@ EXPORT_SYMBOL(mptscsih_event_process); EXPORT_SYMBOL(mptscsih_ioc_reset); EXPORT_SYMBOL(mptscsih_change_queue_depth); EXPORT_SYMBOL(mptscsih_timer_expired); +EXPORT_SYMBOL(mptscsih_TMHandler); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ diff --git a/trunk/drivers/message/fusion/mptscsih.h b/trunk/drivers/message/fusion/mptscsih.h index eb3f677528ac..319aa3033371 100644 --- a/trunk/drivers/message/fusion/mptscsih.h +++ b/trunk/drivers/message/fusion/mptscsih.h @@ -60,7 +60,6 @@ #define MPT_SCANDV_SELECTION_TIMEOUT (0x00000008) #define MPT_SCANDV_ISSUE_SENSE (0x00000010) #define MPT_SCANDV_FALLBACK (0x00000020) -#define MPT_SCANDV_BUSY (0x00000040) #define MPT_SCANDV_MAX_RETRIES (10) @@ -90,7 +89,6 @@ #endif - typedef struct _internal_cmd { char *data; /* data pointer */ dma_addr_t data_dma; /* data dma address */ @@ -114,8 +112,6 @@ extern int mptscsih_resume(struct pci_dev *pdev); extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func); extern const char * mptscsih_info(struct Scsi_Host *SChost); extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)); -extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, - u8 id, int lun, int ctx2abort, ulong timeout); extern void mptscsih_slave_destroy(struct scsi_device *device); extern int mptscsih_slave_configure(struct scsi_device *device); extern int mptscsih_abort(struct scsi_cmnd * SCpnt); @@ -130,8 +126,7 @@ extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pE extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset); extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth); extern void mptscsih_timer_expired(unsigned long data); +extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout); extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id); extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); extern struct device_attribute *mptscsih_host_attrs[]; -extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i); -extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code); diff --git a/trunk/drivers/message/fusion/mptspi.c b/trunk/drivers/message/fusion/mptspi.c index c5b808fd55ba..61620144e49c 100644 --- a/trunk/drivers/message/fusion/mptspi.c +++ b/trunk/drivers/message/fusion/mptspi.c @@ -300,7 +300,7 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id) flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE | (IOCPage4Ptr->Header.PageLength + ii) * 4; - ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma); + mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma); ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n", @@ -614,24 +614,19 @@ static void mptspi_read_parameters(struct scsi_target *starget) spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0; } -int +static int mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id) { - MPT_ADAPTER *ioc = hd->ioc; MpiRaidActionRequest_t *pReq; MPT_FRAME_HDR *mf; - int ret; - unsigned long timeleft; - - mutex_lock(&ioc->internal_cmds.mutex); + MPT_ADAPTER *ioc = hd->ioc; /* Get and Populate a free Frame */ if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) { - dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT - "%s: no msg frames!\n", ioc->name, __func__)); - ret = -EAGAIN; - goto out; + ddvprintk(ioc, printk(MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n", + ioc->name)); + return -EAGAIN; } pReq = (MpiRaidActionRequest_t *)mf; if (quiesce) @@ -648,36 +643,29 @@ mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id) pReq->Reserved2 = 0; pReq->ActionDataWord = 0; /* Reserved for this action */ - ioc->add_sge((char *)&pReq->ActionDataSGE, + mpt_add_sge((char *)&pReq->ActionDataSGE, MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1); ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n", ioc->name, pReq->Action, channel, id)); - INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status) + hd->pLocal = NULL; + hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */ + hd->scandv_wait_done = 0; + + /* Save cmd pointer, for resource free if timeout or + * FW reload occurs + */ + hd->cmdPtr = mf; + + add_timer(&hd->timer); mpt_put_msg_frame(ioc->InternalCtx, ioc, mf); - timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, 10*HZ); - if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) { - ret = -ETIME; - dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: TIMED OUT!\n", - ioc->name, __func__)); - if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) - goto out; - if (!timeleft) { - printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n", - ioc->name, __func__); - mpt_HardResetHandler(ioc, CAN_SLEEP); - mpt_free_msg_frame(ioc, mf); - } - goto out; - } + wait_event(hd->scandv_waitq, hd->scandv_wait_done); - ret = ioc->internal_cmds.completion_code; + if ((hd->pLocal == NULL) || (hd->pLocal->completion != 0)) + return -1; - out: - CLEAR_MGMT_STATUS(ioc->internal_cmds.status) - mutex_unlock(&ioc->internal_cmds.mutex); - return ret; + return 0; } static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd, @@ -1435,15 +1423,17 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) * A slightly different algorithm is required for * 64bit SGEs. */ - scale = ioc->req_sz/ioc->SGE_size; - if (ioc->sg_addr_size == sizeof(u64)) { + scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32)); + if (sizeof(dma_addr_t) == sizeof(u64)) { numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + - (ioc->req_sz - 60) / ioc->SGE_size; + (ioc->req_sz - 60) / (sizeof(dma_addr_t) + + sizeof(u32)); } else { numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale + - (ioc->req_sz - 64) / ioc->SGE_size; + (ioc->req_sz - 64) / (sizeof(dma_addr_t) + + sizeof(u32)); } if (numSGE < sh->sg_tablesize) { @@ -1474,6 +1464,9 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Clear the TM flags */ + hd->tmPending = 0; + hd->tmState = TM_STATE_NONE; + hd->resetPending = 0; hd->abortSCpnt = NULL; /* Clear the pointer used to store @@ -1500,6 +1493,8 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) mpt_saf_te)); ioc->spi_data.noQas = 0; + init_waitqueue_head(&hd->scandv_waitq); + hd->scandv_wait_done = 0; hd->last_queue_full = 0; hd->spi_pending = 0; @@ -1519,7 +1514,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id) * issue internal bus reset */ if (ioc->spi_data.bus_reset) - mptscsih_IssueTaskMgmt(hd, + mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 0, 0, 0, 0, 5); diff --git a/trunk/drivers/net/Kconfig b/trunk/drivers/net/Kconfig index f3c4a3b910bb..214a92d1ef75 100644 --- a/trunk/drivers/net/Kconfig +++ b/trunk/drivers/net/Kconfig @@ -2264,17 +2264,6 @@ config BNX2 To compile this driver as a module, choose M here: the module will be called bnx2. This is recommended. -config CNIC - tristate "Broadcom CNIC support" - depends on BNX2 - depends on UIO - help - This driver supports offload features of Broadcom NetXtremeII - gigabit Ethernet cards. - - To compile this driver as a module, choose M here: the module - will be called cnic. This is recommended. - config SPIDER_NET tristate "Spider Gigabit Ethernet driver" depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) diff --git a/trunk/drivers/net/Makefile b/trunk/drivers/net/Makefile index db30ebd7b262..a1c25cb4669f 100644 --- a/trunk/drivers/net/Makefile +++ b/trunk/drivers/net/Makefile @@ -73,7 +73,6 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_TIGON3) += tg3.o obj-$(CONFIG_BNX2) += bnx2.o -obj-$(CONFIG_CNIC) += cnic.o obj-$(CONFIG_BNX2X) += bnx2x.o bnx2x-objs := bnx2x_main.o bnx2x_link.o spidernet-y += spider_net.o spider_net_ethtool.o diff --git a/trunk/drivers/net/bnx2.c b/trunk/drivers/net/bnx2.c index 3f5fcb0156a1..b0cb29d4cc01 100644 --- a/trunk/drivers/net/bnx2.c +++ b/trunk/drivers/net/bnx2.c @@ -49,10 +49,6 @@ #include #include -#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) -#define BCM_CNIC 1 -#include "cnic_if.h" -#endif #include "bnx2.h" #include "bnx2_fw.h" @@ -319,158 +315,6 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val) spin_unlock_bh(&bp->indirect_lock); } -#ifdef BCM_CNIC -static int -bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info) -{ - struct bnx2 *bp = netdev_priv(dev); - struct drv_ctl_io *io = &info->data.io; - - switch (info->cmd) { - case DRV_CTL_IO_WR_CMD: - bnx2_reg_wr_ind(bp, io->offset, io->data); - break; - case DRV_CTL_IO_RD_CMD: - io->data = bnx2_reg_rd_ind(bp, io->offset); - break; - case DRV_CTL_CTX_WR_CMD: - bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data); - break; - default: - return -EINVAL; - } - return 0; -} - -static void bnx2_setup_cnic_irq_info(struct bnx2 *bp) -{ - struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; - int sb_id; - - if (bp->flags & BNX2_FLAG_USING_MSIX) { - cp->drv_state |= CNIC_DRV_STATE_USING_MSIX; - bnapi->cnic_present = 0; - sb_id = bp->irq_nvecs; - cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX; - } else { - cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX; - bnapi->cnic_tag = bnapi->last_status_idx; - bnapi->cnic_present = 1; - sb_id = 0; - cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX; - } - - cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector; - cp->irq_arr[0].status_blk = (void *) - ((unsigned long) bnapi->status_blk.msi + - (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id)); - cp->irq_arr[0].status_blk_num = sb_id; - cp->num_irq = 1; -} - -static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops, - void *data) -{ - struct bnx2 *bp = netdev_priv(dev); - struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - - if (ops == NULL) - return -EINVAL; - - if (cp->drv_state & CNIC_DRV_STATE_REGD) - return -EBUSY; - - bp->cnic_data = data; - rcu_assign_pointer(bp->cnic_ops, ops); - - cp->num_irq = 0; - cp->drv_state = CNIC_DRV_STATE_REGD; - - bnx2_setup_cnic_irq_info(bp); - - return 0; -} - -static int bnx2_unregister_cnic(struct net_device *dev) -{ - struct bnx2 *bp = netdev_priv(dev); - struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; - struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - - cp->drv_state = 0; - bnapi->cnic_present = 0; - rcu_assign_pointer(bp->cnic_ops, NULL); - synchronize_rcu(); - return 0; -} - -struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev) -{ - struct bnx2 *bp = netdev_priv(dev); - struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - - cp->drv_owner = THIS_MODULE; - cp->chip_id = bp->chip_id; - cp->pdev = bp->pdev; - cp->io_base = bp->regview; - cp->drv_ctl = bnx2_drv_ctl; - cp->drv_register_cnic = bnx2_register_cnic; - cp->drv_unregister_cnic = bnx2_unregister_cnic; - - return cp; -} -EXPORT_SYMBOL(bnx2_cnic_probe); - -static void -bnx2_cnic_stop(struct bnx2 *bp) -{ - struct cnic_ops *c_ops; - struct cnic_ctl_info info; - - rcu_read_lock(); - c_ops = rcu_dereference(bp->cnic_ops); - if (c_ops) { - info.cmd = CNIC_CTL_STOP_CMD; - c_ops->cnic_ctl(bp->cnic_data, &info); - } - rcu_read_unlock(); -} - -static void -bnx2_cnic_start(struct bnx2 *bp) -{ - struct cnic_ops *c_ops; - struct cnic_ctl_info info; - - rcu_read_lock(); - c_ops = rcu_dereference(bp->cnic_ops); - if (c_ops) { - if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { - struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; - - bnapi->cnic_tag = bnapi->last_status_idx; - } - info.cmd = CNIC_CTL_START_CMD; - c_ops->cnic_ctl(bp->cnic_data, &info); - } - rcu_read_unlock(); -} - -#else - -static void -bnx2_cnic_stop(struct bnx2 *bp) -{ -} - -static void -bnx2_cnic_start(struct bnx2 *bp) -{ -} - -#endif - static int bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val) { @@ -644,7 +488,6 @@ bnx2_napi_enable(struct bnx2 *bp) static void bnx2_netif_stop(struct bnx2 *bp) { - bnx2_cnic_stop(bp); bnx2_disable_int_sync(bp); if (netif_running(bp->dev)) { bnx2_napi_disable(bp); @@ -661,7 +504,6 @@ bnx2_netif_start(struct bnx2 *bp) netif_tx_wake_all_queues(bp->dev); bnx2_napi_enable(bp); bnx2_enable_int(bp); - bnx2_cnic_start(bp); } } } @@ -3322,11 +3164,6 @@ bnx2_has_work(struct bnx2_napi *bnapi) if (bnx2_has_fast_work(bnapi)) return 1; -#ifdef BCM_CNIC - if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx)) - return 1; -#endif - if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) != (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS)) return 1; @@ -3356,23 +3193,6 @@ bnx2_chk_missed_msi(struct bnx2 *bp) bp->idle_chk_status_idx = bnapi->last_status_idx; } -#ifdef BCM_CNIC -static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi) -{ - struct cnic_ops *c_ops; - - if (!bnapi->cnic_present) - return; - - rcu_read_lock(); - c_ops = rcu_dereference(bp->cnic_ops); - if (c_ops) - bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data, - bnapi->status_blk.msi); - rcu_read_unlock(); -} -#endif - static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) { struct status_block *sblk = bnapi->status_blk.msi; @@ -3447,10 +3267,6 @@ static int bnx2_poll(struct napi_struct *napi, int budget) work_done = bnx2_poll_work(bp, bnapi, work_done, budget); -#ifdef BCM_CNIC - bnx2_poll_cnic(bp, bnapi); -#endif - /* bnapi->last_status_idx is used below to tell the hw how * much work has been processed, so we must read it before * checking for more work. @@ -4816,11 +4632,8 @@ bnx2_init_chip(struct bnx2 *bp) val = REG_RD(bp, BNX2_MQ_CONFIG); val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256; - if (CHIP_NUM(bp) == CHIP_NUM_5709) { - val |= BNX2_MQ_CONFIG_BIN_MQ_MODE; - if (CHIP_REV(bp) == CHIP_REV_Ax) - val |= BNX2_MQ_CONFIG_HALT_DIS; - } + if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1) + val |= BNX2_MQ_CONFIG_HALT_DIS; REG_WR(bp, BNX2_MQ_CONFIG, val); @@ -7658,7 +7471,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) INIT_WORK(&bp->reset_task, bnx2_reset_task); dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); - mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1); + mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS); dev->mem_end = dev->mem_start + mem_len; dev->irq = pdev->irq; diff --git a/trunk/drivers/net/bnx2.h b/trunk/drivers/net/bnx2.h index a1ff739bc9b5..5b570e17c839 100644 --- a/trunk/drivers/net/bnx2.h +++ b/trunk/drivers/net/bnx2.h @@ -361,9 +361,6 @@ struct l2_fhdr { #define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28) #define BNX2_L2CTX_HOST_BDIDX 0x00000004 -#define BNX2_L2CTX_STATUSB_NUM_SHIFT 16 -#define BNX2_L2CTX_STATUSB_NUM(sb_id) \ - (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0) #define BNX2_L2CTX_HOST_BSEQ 0x00000008 #define BNX2_L2CTX_NX_BSEQ 0x0000000c #define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010 @@ -5903,7 +5900,6 @@ struct l2_fhdr { #define BNX2_RXP_FTQ_CTL_CUR_DEPTH (0x3ffL<<22) #define BNX2_RXP_SCRATCH 0x000e0000 -#define BNX2_RXP_SCRATCH_RXP_FLOOD 0x000e0024 #define BNX2_RXP_SCRATCH_RSS_TBL_SZ 0x000e0038 #define BNX2_RXP_SCRATCH_RSS_TBL 0x000e003c #define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES 128 @@ -6682,11 +6678,6 @@ struct bnx2_napi { u32 last_status_idx; u32 int_num; -#ifdef BCM_CNIC - u32 cnic_tag; - int cnic_present; -#endif - struct bnx2_rx_ring_info rx_ring; struct bnx2_tx_ring_info tx_ring; }; @@ -6736,11 +6727,6 @@ struct bnx2 { int tx_ring_size; u32 tx_wake_thresh; -#ifdef BCM_CNIC - struct cnic_ops *cnic_ops; - void *cnic_data; -#endif - /* End of fields used in the performance code paths. */ unsigned int current_interval; @@ -6899,10 +6885,6 @@ struct bnx2 { u32 idle_chk_status_idx; -#ifdef BCM_CNIC - struct cnic_eth_dev cnic_eth_dev; -#endif - const struct firmware *mips_firmware; const struct firmware *rv2p_firmware; }; diff --git a/trunk/drivers/net/cnic.c b/trunk/drivers/net/cnic.c deleted file mode 100644 index 8d740376bbd2..000000000000 --- a/trunk/drivers/net/cnic.c +++ /dev/null @@ -1,2711 +0,0 @@ -/* cnic.c: Broadcom CNIC core network driver. - * - * Copyright (c) 2006-2009 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) - * Modified and maintained by: Michael Chan - */ - -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) -#define BCM_VLAN 1 -#endif -#include -#include -#include -#include -#include -#include - -#include "cnic_if.h" -#include "bnx2.h" -#include "cnic.h" -#include "cnic_defs.h" - -#define DRV_MODULE_NAME "cnic" -#define PFX DRV_MODULE_NAME ": " - -static char version[] __devinitdata = - "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; - -MODULE_AUTHOR("Michael Chan and John(Zongxi) " - "Chen (zongxi@broadcom.com"); -MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(CNIC_MODULE_VERSION); - -static LIST_HEAD(cnic_dev_list); -static DEFINE_RWLOCK(cnic_dev_lock); -static DEFINE_MUTEX(cnic_lock); - -static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; - -static int cnic_service_bnx2(void *, void *); -static int cnic_ctl(void *, struct cnic_ctl_info *); - -static struct cnic_ops cnic_bnx2_ops = { - .cnic_owner = THIS_MODULE, - .cnic_handler = cnic_service_bnx2, - .cnic_ctl = cnic_ctl, -}; - -static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *); -static void cnic_init_bnx2_tx_ring(struct cnic_dev *); -static void cnic_init_bnx2_rx_ring(struct cnic_dev *); -static int cnic_cm_set_pg(struct cnic_sock *); - -static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) -{ - struct cnic_dev *dev = uinfo->priv; - struct cnic_local *cp = dev->cnic_priv; - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (cp->uio_dev != -1) - return -EBUSY; - - cp->uio_dev = iminor(inode); - - cnic_shutdown_bnx2_rx_ring(dev); - - cnic_init_bnx2_tx_ring(dev); - cnic_init_bnx2_rx_ring(dev); - - return 0; -} - -static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) -{ - struct cnic_dev *dev = uinfo->priv; - struct cnic_local *cp = dev->cnic_priv; - - cp->uio_dev = -1; - return 0; -} - -static inline void cnic_hold(struct cnic_dev *dev) -{ - atomic_inc(&dev->ref_count); -} - -static inline void cnic_put(struct cnic_dev *dev) -{ - atomic_dec(&dev->ref_count); -} - -static inline void csk_hold(struct cnic_sock *csk) -{ - atomic_inc(&csk->ref_count); -} - -static inline void csk_put(struct cnic_sock *csk) -{ - atomic_dec(&csk->ref_count); -} - -static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) -{ - struct cnic_dev *cdev; - - read_lock(&cnic_dev_lock); - list_for_each_entry(cdev, &cnic_dev_list, list) { - if (netdev == cdev->netdev) { - cnic_hold(cdev); - read_unlock(&cnic_dev_lock); - return cdev; - } - } - read_unlock(&cnic_dev_lock); - return NULL; -} - -static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - struct drv_ctl_info info; - struct drv_ctl_io *io = &info.data.io; - - info.cmd = DRV_CTL_CTX_WR_CMD; - io->cid_addr = cid_addr; - io->offset = off; - io->data = val; - ethdev->drv_ctl(dev->netdev, &info); -} - -static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - struct drv_ctl_info info; - struct drv_ctl_io *io = &info.data.io; - - info.cmd = DRV_CTL_IO_WR_CMD; - io->offset = off; - io->data = val; - ethdev->drv_ctl(dev->netdev, &info); -} - -static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - struct drv_ctl_info info; - struct drv_ctl_io *io = &info.data.io; - - info.cmd = DRV_CTL_IO_RD_CMD; - io->offset = off; - ethdev->drv_ctl(dev->netdev, &info); - return io->data; -} - -static int cnic_in_use(struct cnic_sock *csk) -{ - return test_bit(SK_F_INUSE, &csk->flags); -} - -static void cnic_kwq_completion(struct cnic_dev *dev, u32 count) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - struct drv_ctl_info info; - - info.cmd = DRV_CTL_COMPLETION_CMD; - info.data.comp.comp_count = count; - ethdev->drv_ctl(dev->netdev, &info); -} - -static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, - struct cnic_sock *csk) -{ - struct iscsi_path path_req; - char *buf = NULL; - u16 len = 0; - u32 msg_type = ISCSI_KEVENT_IF_DOWN; - struct cnic_ulp_ops *ulp_ops; - - if (cp->uio_dev == -1) - return -ENODEV; - - if (csk) { - len = sizeof(path_req); - buf = (char *) &path_req; - memset(&path_req, 0, len); - - msg_type = ISCSI_KEVENT_PATH_REQ; - path_req.handle = (u64) csk->l5_cid; - if (test_bit(SK_F_IPV6, &csk->flags)) { - memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0], - sizeof(struct in6_addr)); - path_req.ip_addr_len = 16; - } else { - memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0], - sizeof(struct in_addr)); - path_req.ip_addr_len = 4; - } - path_req.vlan_id = csk->vlan_id; - path_req.pmtu = csk->mtu; - } - - rcu_read_lock(); - ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]); - if (ulp_ops) - ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len); - rcu_read_unlock(); - return 0; -} - -static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, - char *buf, u16 len) -{ - int rc = -EINVAL; - - switch (msg_type) { - case ISCSI_UEVENT_PATH_UPDATE: { - struct cnic_local *cp; - u32 l5_cid; - struct cnic_sock *csk; - struct iscsi_path *path_resp; - - if (len < sizeof(*path_resp)) - break; - - path_resp = (struct iscsi_path *) buf; - cp = dev->cnic_priv; - l5_cid = (u32) path_resp->handle; - if (l5_cid >= MAX_CM_SK_TBL_SZ) - break; - - csk = &cp->csk_tbl[l5_cid]; - csk_hold(csk); - if (cnic_in_use(csk)) { - memcpy(csk->ha, path_resp->mac_addr, 6); - if (test_bit(SK_F_IPV6, &csk->flags)) - memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, - sizeof(struct in6_addr)); - else - memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, - sizeof(struct in_addr)); - if (is_valid_ether_addr(csk->ha)) - cnic_cm_set_pg(csk); - } - csk_put(csk); - rc = 0; - } - } - - return rc; -} - -static int cnic_offld_prep(struct cnic_sock *csk) -{ - if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) - return 0; - - if (!test_bit(SK_F_CONNECT_START, &csk->flags)) { - clear_bit(SK_F_OFFLD_SCHED, &csk->flags); - return 0; - } - - return 1; -} - -static int cnic_close_prep(struct cnic_sock *csk) -{ - clear_bit(SK_F_CONNECT_START, &csk->flags); - smp_mb__after_clear_bit(); - - if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { - while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) - msleep(1); - - return 1; - } - return 0; -} - -static int cnic_abort_prep(struct cnic_sock *csk) -{ - clear_bit(SK_F_CONNECT_START, &csk->flags); - smp_mb__after_clear_bit(); - - while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) - msleep(1); - - if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { - csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; - return 1; - } - - return 0; -} - -int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) -{ - struct cnic_dev *dev; - - if (ulp_type >= MAX_CNIC_ULP_TYPE) { - printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n", - ulp_type); - return -EINVAL; - } - mutex_lock(&cnic_lock); - if (cnic_ulp_tbl[ulp_type]) { - printk(KERN_ERR PFX "cnic_register_driver: Type %d has already " - "been registered\n", ulp_type); - mutex_unlock(&cnic_lock); - return -EBUSY; - } - - read_lock(&cnic_dev_lock); - list_for_each_entry(dev, &cnic_dev_list, list) { - struct cnic_local *cp = dev->cnic_priv; - - clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]); - } - read_unlock(&cnic_dev_lock); - - rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); - mutex_unlock(&cnic_lock); - - /* Prevent race conditions with netdev_event */ - rtnl_lock(); - read_lock(&cnic_dev_lock); - list_for_each_entry(dev, &cnic_dev_list, list) { - struct cnic_local *cp = dev->cnic_priv; - - if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) - ulp_ops->cnic_init(dev); - } - read_unlock(&cnic_dev_lock); - rtnl_unlock(); - - return 0; -} - -int cnic_unregister_driver(int ulp_type) -{ - struct cnic_dev *dev; - - if (ulp_type >= MAX_CNIC_ULP_TYPE) { - printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", - ulp_type); - return -EINVAL; - } - mutex_lock(&cnic_lock); - if (!cnic_ulp_tbl[ulp_type]) { - printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not " - "been registered\n", ulp_type); - goto out_unlock; - } - read_lock(&cnic_dev_lock); - list_for_each_entry(dev, &cnic_dev_list, list) { - struct cnic_local *cp = dev->cnic_priv; - - if (rcu_dereference(cp->ulp_ops[ulp_type])) { - printk(KERN_ERR PFX "cnic_unregister_driver: Type %d " - "still has devices registered\n", ulp_type); - read_unlock(&cnic_dev_lock); - goto out_unlock; - } - } - read_unlock(&cnic_dev_lock); - - rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); - - mutex_unlock(&cnic_lock); - synchronize_rcu(); - return 0; - -out_unlock: - mutex_unlock(&cnic_lock); - return -EINVAL; -} - -static int cnic_start_hw(struct cnic_dev *); -static void cnic_stop_hw(struct cnic_dev *); - -static int cnic_register_device(struct cnic_dev *dev, int ulp_type, - void *ulp_ctx) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_ulp_ops *ulp_ops; - - if (ulp_type >= MAX_CNIC_ULP_TYPE) { - printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n", - ulp_type); - return -EINVAL; - } - mutex_lock(&cnic_lock); - if (cnic_ulp_tbl[ulp_type] == NULL) { - printk(KERN_ERR PFX "cnic_register_device: Driver with type %d " - "has not been registered\n", ulp_type); - mutex_unlock(&cnic_lock); - return -EAGAIN; - } - if (rcu_dereference(cp->ulp_ops[ulp_type])) { - printk(KERN_ERR PFX "cnic_register_device: Type %d has already " - "been registered to this device\n", ulp_type); - mutex_unlock(&cnic_lock); - return -EBUSY; - } - - clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); - cp->ulp_handle[ulp_type] = ulp_ctx; - ulp_ops = cnic_ulp_tbl[ulp_type]; - rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); - cnic_hold(dev); - - if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) - if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type])) - ulp_ops->cnic_start(cp->ulp_handle[ulp_type]); - - mutex_unlock(&cnic_lock); - - return 0; - -} -EXPORT_SYMBOL(cnic_register_driver); - -static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) -{ - struct cnic_local *cp = dev->cnic_priv; - - if (ulp_type >= MAX_CNIC_ULP_TYPE) { - printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n", - ulp_type); - return -EINVAL; - } - mutex_lock(&cnic_lock); - if (rcu_dereference(cp->ulp_ops[ulp_type])) { - rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL); - cnic_put(dev); - } else { - printk(KERN_ERR PFX "cnic_unregister_device: device not " - "registered to this ulp type %d\n", ulp_type); - mutex_unlock(&cnic_lock); - return -EINVAL; - } - mutex_unlock(&cnic_lock); - - synchronize_rcu(); - - return 0; -} -EXPORT_SYMBOL(cnic_unregister_driver); - -static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id) -{ - id_tbl->start = start_id; - id_tbl->max = size; - id_tbl->next = 0; - spin_lock_init(&id_tbl->lock); - id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); - if (!id_tbl->table) - return -ENOMEM; - - return 0; -} - -static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl) -{ - kfree(id_tbl->table); - id_tbl->table = NULL; -} - -static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id) -{ - int ret = -1; - - id -= id_tbl->start; - if (id >= id_tbl->max) - return ret; - - spin_lock(&id_tbl->lock); - if (!test_bit(id, id_tbl->table)) { - set_bit(id, id_tbl->table); - ret = 0; - } - spin_unlock(&id_tbl->lock); - return ret; -} - -/* Returns -1 if not successful */ -static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl) -{ - u32 id; - - spin_lock(&id_tbl->lock); - id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); - if (id >= id_tbl->max) { - id = -1; - if (id_tbl->next != 0) { - id = find_first_zero_bit(id_tbl->table, id_tbl->next); - if (id >= id_tbl->next) - id = -1; - } - } - - if (id < id_tbl->max) { - set_bit(id, id_tbl->table); - id_tbl->next = (id + 1) & (id_tbl->max - 1); - id += id_tbl->start; - } - - spin_unlock(&id_tbl->lock); - - return id; -} - -static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id) -{ - if (id == -1) - return; - - id -= id_tbl->start; - if (id >= id_tbl->max) - return; - - clear_bit(id, id_tbl->table); -} - -static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) -{ - int i; - - if (!dma->pg_arr) - return; - - for (i = 0; i < dma->num_pages; i++) { - if (dma->pg_arr[i]) { - pci_free_consistent(dev->pcidev, BCM_PAGE_SIZE, - dma->pg_arr[i], dma->pg_map_arr[i]); - dma->pg_arr[i] = NULL; - } - } - if (dma->pgtbl) { - pci_free_consistent(dev->pcidev, dma->pgtbl_size, - dma->pgtbl, dma->pgtbl_map); - dma->pgtbl = NULL; - } - kfree(dma->pg_arr); - dma->pg_arr = NULL; - dma->num_pages = 0; -} - -static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) -{ - int i; - u32 *page_table = dma->pgtbl; - - for (i = 0; i < dma->num_pages; i++) { - /* Each entry needs to be in big endian format. */ - *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32); - page_table++; - *page_table = (u32) dma->pg_map_arr[i]; - page_table++; - } -} - -static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, - int pages, int use_pg_tbl) -{ - int i, size; - struct cnic_local *cp = dev->cnic_priv; - - size = pages * (sizeof(void *) + sizeof(dma_addr_t)); - dma->pg_arr = kzalloc(size, GFP_ATOMIC); - if (dma->pg_arr == NULL) - return -ENOMEM; - - dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages); - dma->num_pages = pages; - - for (i = 0; i < pages; i++) { - dma->pg_arr[i] = pci_alloc_consistent(dev->pcidev, - BCM_PAGE_SIZE, - &dma->pg_map_arr[i]); - if (dma->pg_arr[i] == NULL) - goto error; - } - if (!use_pg_tbl) - return 0; - - dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) & - ~(BCM_PAGE_SIZE - 1); - dma->pgtbl = pci_alloc_consistent(dev->pcidev, dma->pgtbl_size, - &dma->pgtbl_map); - if (dma->pgtbl == NULL) - goto error; - - cp->setup_pgtbl(dev, dma); - - return 0; - -error: - cnic_free_dma(dev, dma); - return -ENOMEM; -} - -static void cnic_free_resc(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - int i = 0; - - if (cp->cnic_uinfo) { - cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); - while (cp->uio_dev != -1 && i < 15) { - msleep(100); - i++; - } - uio_unregister_device(cp->cnic_uinfo); - kfree(cp->cnic_uinfo); - cp->cnic_uinfo = NULL; - } - - if (cp->l2_buf) { - pci_free_consistent(dev->pcidev, cp->l2_buf_size, - cp->l2_buf, cp->l2_buf_map); - cp->l2_buf = NULL; - } - - if (cp->l2_ring) { - pci_free_consistent(dev->pcidev, cp->l2_ring_size, - cp->l2_ring, cp->l2_ring_map); - cp->l2_ring = NULL; - } - - for (i = 0; i < cp->ctx_blks; i++) { - if (cp->ctx_arr[i].ctx) { - pci_free_consistent(dev->pcidev, cp->ctx_blk_size, - cp->ctx_arr[i].ctx, - cp->ctx_arr[i].mapping); - cp->ctx_arr[i].ctx = NULL; - } - } - kfree(cp->ctx_arr); - cp->ctx_arr = NULL; - cp->ctx_blks = 0; - - cnic_free_dma(dev, &cp->gbl_buf_info); - cnic_free_dma(dev, &cp->conn_buf_info); - cnic_free_dma(dev, &cp->kwq_info); - cnic_free_dma(dev, &cp->kcq_info); - kfree(cp->iscsi_tbl); - cp->iscsi_tbl = NULL; - kfree(cp->ctx_tbl); - cp->ctx_tbl = NULL; - - cnic_free_id_tbl(&cp->cid_tbl); -} - -static int cnic_alloc_context(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - - if (CHIP_NUM(cp) == CHIP_NUM_5709) { - int i, k, arr_size; - - cp->ctx_blk_size = BCM_PAGE_SIZE; - cp->cids_per_blk = BCM_PAGE_SIZE / 128; - arr_size = BNX2_MAX_CID / cp->cids_per_blk * - sizeof(struct cnic_ctx); - cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); - if (cp->ctx_arr == NULL) - return -ENOMEM; - - k = 0; - for (i = 0; i < 2; i++) { - u32 j, reg, off, lo, hi; - - if (i == 0) - off = BNX2_PG_CTX_MAP; - else - off = BNX2_ISCSI_CTX_MAP; - - reg = cnic_reg_rd_ind(dev, off); - lo = reg >> 16; - hi = reg & 0xffff; - for (j = lo; j < hi; j += cp->cids_per_blk, k++) - cp->ctx_arr[k].cid = j; - } - - cp->ctx_blks = k; - if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) { - cp->ctx_blks = 0; - return -ENOMEM; - } - - for (i = 0; i < cp->ctx_blks; i++) { - cp->ctx_arr[i].ctx = - pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE, - &cp->ctx_arr[i].mapping); - if (cp->ctx_arr[i].ctx == NULL) - return -ENOMEM; - } - } - return 0; -} - -static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct uio_info *uinfo; - int ret; - - ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1); - if (ret) - goto error; - cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; - - ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1); - if (ret) - goto error; - cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr; - - ret = cnic_alloc_context(dev); - if (ret) - goto error; - - cp->l2_ring_size = 2 * BCM_PAGE_SIZE; - cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size, - &cp->l2_ring_map); - if (!cp->l2_ring) - goto error; - - cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; - cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size); - cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size, - &cp->l2_buf_map); - if (!cp->l2_buf) - goto error; - - uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC); - if (!uinfo) - goto error; - - uinfo->mem[0].addr = dev->netdev->base_addr; - uinfo->mem[0].internal_addr = dev->regview; - uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; - uinfo->mem[0].memtype = UIO_MEM_PHYS; - - uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK; - if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) - uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; - else - uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; - uinfo->mem[1].memtype = UIO_MEM_LOGICAL; - - uinfo->mem[2].addr = (unsigned long) cp->l2_ring; - uinfo->mem[2].size = cp->l2_ring_size; - uinfo->mem[2].memtype = UIO_MEM_LOGICAL; - - uinfo->mem[3].addr = (unsigned long) cp->l2_buf; - uinfo->mem[3].size = cp->l2_buf_size; - uinfo->mem[3].memtype = UIO_MEM_LOGICAL; - - uinfo->name = "bnx2_cnic"; - uinfo->version = CNIC_MODULE_VERSION; - uinfo->irq = UIO_IRQ_CUSTOM; - - uinfo->open = cnic_uio_open; - uinfo->release = cnic_uio_close; - - uinfo->priv = dev; - - ret = uio_register_device(&dev->pcidev->dev, uinfo); - if (ret) { - kfree(uinfo); - goto error; - } - - cp->cnic_uinfo = uinfo; - - return 0; - -error: - cnic_free_resc(dev); - return ret; -} - -static inline u32 cnic_kwq_avail(struct cnic_local *cp) -{ - return cp->max_kwq_idx - - ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx); -} - -static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], - u32 num_wqes) -{ - struct cnic_local *cp = dev->cnic_priv; - struct kwqe *prod_qe; - u16 prod, sw_prod, i; - - if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) - return -EAGAIN; /* bnx2 is down */ - - spin_lock_bh(&cp->cnic_ulp_lock); - if (num_wqes > cnic_kwq_avail(cp) && - !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) { - spin_unlock_bh(&cp->cnic_ulp_lock); - return -EAGAIN; - } - - cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT; - - prod = cp->kwq_prod_idx; - sw_prod = prod & MAX_KWQ_IDX; - for (i = 0; i < num_wqes; i++) { - prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)]; - memcpy(prod_qe, wqes[i], sizeof(struct kwqe)); - prod++; - sw_prod = prod & MAX_KWQ_IDX; - } - cp->kwq_prod_idx = prod; - - CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx); - - spin_unlock_bh(&cp->cnic_ulp_lock); - return 0; -} - -static void service_kcqes(struct cnic_dev *dev, int num_cqes) -{ - struct cnic_local *cp = dev->cnic_priv; - int i, j; - - i = 0; - j = 1; - while (num_cqes) { - struct cnic_ulp_ops *ulp_ops; - int ulp_type; - u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; - u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK; - - if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) - cnic_kwq_completion(dev, 1); - - while (j < num_cqes) { - u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; - - if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer) - break; - - if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) - cnic_kwq_completion(dev, 1); - j++; - } - - if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA) - ulp_type = CNIC_ULP_RDMA; - else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) - ulp_type = CNIC_ULP_ISCSI; - else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) - ulp_type = CNIC_ULP_L4; - else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) - goto end; - else { - printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n", - dev->netdev->name, kcqe_op_flag); - goto end; - } - - rcu_read_lock(); - ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); - if (likely(ulp_ops)) { - ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], - cp->completed_kcq + i, j); - } - rcu_read_unlock(); -end: - num_cqes -= j; - i += j; - j = 1; - } - return; -} - -static u16 cnic_bnx2_next_idx(u16 idx) -{ - return idx + 1; -} - -static u16 cnic_bnx2_hw_idx(u16 idx) -{ - return idx; -} - -static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) -{ - struct cnic_local *cp = dev->cnic_priv; - u16 i, ri, last; - struct kcqe *kcqe; - int kcqe_cnt = 0, last_cnt = 0; - - i = ri = last = *sw_prod; - ri &= MAX_KCQ_IDX; - - while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { - kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; - cp->completed_kcq[kcqe_cnt++] = kcqe; - i = cp->next_idx(i); - ri = i & MAX_KCQ_IDX; - if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) { - last_cnt = kcqe_cnt; - last = i; - } - } - - *sw_prod = last; - return last_cnt; -} - -static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp) -{ - u16 rx_cons = *cp->rx_cons_ptr; - u16 tx_cons = *cp->tx_cons_ptr; - - if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { - cp->tx_cons = tx_cons; - cp->rx_cons = rx_cons; - uio_event_notify(cp->cnic_uinfo); - } -} - -static int cnic_service_bnx2(void *data, void *status_blk) -{ - struct cnic_dev *dev = data; - struct status_block *sblk = status_blk; - struct cnic_local *cp = dev->cnic_priv; - u32 status_idx = sblk->status_idx; - u16 hw_prod, sw_prod; - int kcqe_cnt; - - if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) - return status_idx; - - cp->kwq_con_idx = *cp->kwq_con_idx_ptr; - - hw_prod = sblk->status_completion_producer_index; - sw_prod = cp->kcq_prod_idx; - while (sw_prod != hw_prod) { - kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); - if (kcqe_cnt == 0) - goto done; - - service_kcqes(dev, kcqe_cnt); - - /* Tell compiler that status_blk fields can change. */ - barrier(); - if (status_idx != sblk->status_idx) { - status_idx = sblk->status_idx; - cp->kwq_con_idx = *cp->kwq_con_idx_ptr; - hw_prod = sblk->status_completion_producer_index; - } else - break; - } - -done: - CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); - - cp->kcq_prod_idx = sw_prod; - - cnic_chk_bnx2_pkt_rings(cp); - return status_idx; -} - -static void cnic_service_bnx2_msix(unsigned long data) -{ - struct cnic_dev *dev = (struct cnic_dev *) data; - struct cnic_local *cp = dev->cnic_priv; - struct status_block_msix *status_blk = cp->bnx2_status_blk; - u32 status_idx = status_blk->status_idx; - u16 hw_prod, sw_prod; - int kcqe_cnt; - - cp->kwq_con_idx = status_blk->status_cmd_consumer_index; - - hw_prod = status_blk->status_completion_producer_index; - sw_prod = cp->kcq_prod_idx; - while (sw_prod != hw_prod) { - kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod); - if (kcqe_cnt == 0) - goto done; - - service_kcqes(dev, kcqe_cnt); - - /* Tell compiler that status_blk fields can change. */ - barrier(); - if (status_idx != status_blk->status_idx) { - status_idx = status_blk->status_idx; - cp->kwq_con_idx = status_blk->status_cmd_consumer_index; - hw_prod = status_blk->status_completion_producer_index; - } else - break; - } - -done: - CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); - cp->kcq_prod_idx = sw_prod; - - cnic_chk_bnx2_pkt_rings(cp); - - cp->last_status_idx = status_idx; - CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); -} - -static irqreturn_t cnic_irq(int irq, void *dev_instance) -{ - struct cnic_dev *dev = dev_instance; - struct cnic_local *cp = dev->cnic_priv; - u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX; - - if (cp->ack_int) - cp->ack_int(dev); - - prefetch(cp->status_blk); - prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); - - if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) - tasklet_schedule(&cp->cnic_irq_task); - - return IRQ_HANDLED; -} - -static void cnic_ulp_stop(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - int if_type; - - rcu_read_lock(); - for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { - struct cnic_ulp_ops *ulp_ops; - - ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); - if (!ulp_ops) - continue; - - if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) - ulp_ops->cnic_stop(cp->ulp_handle[if_type]); - } - rcu_read_unlock(); -} - -static void cnic_ulp_start(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - int if_type; - - rcu_read_lock(); - for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { - struct cnic_ulp_ops *ulp_ops; - - ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); - if (!ulp_ops || !ulp_ops->cnic_start) - continue; - - if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) - ulp_ops->cnic_start(cp->ulp_handle[if_type]); - } - rcu_read_unlock(); -} - -static int cnic_ctl(void *data, struct cnic_ctl_info *info) -{ - struct cnic_dev *dev = data; - - switch (info->cmd) { - case CNIC_CTL_STOP_CMD: - cnic_hold(dev); - mutex_lock(&cnic_lock); - - cnic_ulp_stop(dev); - cnic_stop_hw(dev); - - mutex_unlock(&cnic_lock); - cnic_put(dev); - break; - case CNIC_CTL_START_CMD: - cnic_hold(dev); - mutex_lock(&cnic_lock); - - if (!cnic_start_hw(dev)) - cnic_ulp_start(dev); - - mutex_unlock(&cnic_lock); - cnic_put(dev); - break; - default: - return -EINVAL; - } - return 0; -} - -static void cnic_ulp_init(struct cnic_dev *dev) -{ - int i; - struct cnic_local *cp = dev->cnic_priv; - - rcu_read_lock(); - for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { - struct cnic_ulp_ops *ulp_ops; - - ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); - if (!ulp_ops || !ulp_ops->cnic_init) - continue; - - if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) - ulp_ops->cnic_init(dev); - - } - rcu_read_unlock(); -} - -static void cnic_ulp_exit(struct cnic_dev *dev) -{ - int i; - struct cnic_local *cp = dev->cnic_priv; - - rcu_read_lock(); - for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { - struct cnic_ulp_ops *ulp_ops; - - ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); - if (!ulp_ops || !ulp_ops->cnic_exit) - continue; - - if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) - ulp_ops->cnic_exit(dev); - - } - rcu_read_unlock(); -} - -static int cnic_cm_offload_pg(struct cnic_sock *csk) -{ - struct cnic_dev *dev = csk->dev; - struct l4_kwq_offload_pg *l4kwqe; - struct kwqe *wqes[1]; - - l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1; - memset(l4kwqe, 0, sizeof(*l4kwqe)); - wqes[0] = (struct kwqe *) l4kwqe; - - l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG; - l4kwqe->flags = - L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT; - l4kwqe->l2hdr_nbytes = ETH_HLEN; - - l4kwqe->da0 = csk->ha[0]; - l4kwqe->da1 = csk->ha[1]; - l4kwqe->da2 = csk->ha[2]; - l4kwqe->da3 = csk->ha[3]; - l4kwqe->da4 = csk->ha[4]; - l4kwqe->da5 = csk->ha[5]; - - l4kwqe->sa0 = dev->mac_addr[0]; - l4kwqe->sa1 = dev->mac_addr[1]; - l4kwqe->sa2 = dev->mac_addr[2]; - l4kwqe->sa3 = dev->mac_addr[3]; - l4kwqe->sa4 = dev->mac_addr[4]; - l4kwqe->sa5 = dev->mac_addr[5]; - - l4kwqe->etype = ETH_P_IP; - l4kwqe->ipid_count = DEF_IPID_COUNT; - l4kwqe->host_opaque = csk->l5_cid; - - if (csk->vlan_id) { - l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING; - l4kwqe->vlan_tag = csk->vlan_id; - l4kwqe->l2hdr_nbytes += 4; - } - - return dev->submit_kwqes(dev, wqes, 1); -} - -static int cnic_cm_update_pg(struct cnic_sock *csk) -{ - struct cnic_dev *dev = csk->dev; - struct l4_kwq_update_pg *l4kwqe; - struct kwqe *wqes[1]; - - l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1; - memset(l4kwqe, 0, sizeof(*l4kwqe)); - wqes[0] = (struct kwqe *) l4kwqe; - - l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG; - l4kwqe->flags = - L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT; - l4kwqe->pg_cid = csk->pg_cid; - - l4kwqe->da0 = csk->ha[0]; - l4kwqe->da1 = csk->ha[1]; - l4kwqe->da2 = csk->ha[2]; - l4kwqe->da3 = csk->ha[3]; - l4kwqe->da4 = csk->ha[4]; - l4kwqe->da5 = csk->ha[5]; - - l4kwqe->pg_host_opaque = csk->l5_cid; - l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA; - - return dev->submit_kwqes(dev, wqes, 1); -} - -static int cnic_cm_upload_pg(struct cnic_sock *csk) -{ - struct cnic_dev *dev = csk->dev; - struct l4_kwq_upload *l4kwqe; - struct kwqe *wqes[1]; - - l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1; - memset(l4kwqe, 0, sizeof(*l4kwqe)); - wqes[0] = (struct kwqe *) l4kwqe; - - l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG; - l4kwqe->flags = - L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT; - l4kwqe->cid = csk->pg_cid; - - return dev->submit_kwqes(dev, wqes, 1); -} - -static int cnic_cm_conn_req(struct cnic_sock *csk) -{ - struct cnic_dev *dev = csk->dev; - struct l4_kwq_connect_req1 *l4kwqe1; - struct l4_kwq_connect_req2 *l4kwqe2; - struct l4_kwq_connect_req3 *l4kwqe3; - struct kwqe *wqes[3]; - u8 tcp_flags = 0; - int num_wqes = 2; - - l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1; - l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2; - l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3; - memset(l4kwqe1, 0, sizeof(*l4kwqe1)); - memset(l4kwqe2, 0, sizeof(*l4kwqe2)); - memset(l4kwqe3, 0, sizeof(*l4kwqe3)); - - l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3; - l4kwqe3->flags = - L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT; - l4kwqe3->ka_timeout = csk->ka_timeout; - l4kwqe3->ka_interval = csk->ka_interval; - l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count; - l4kwqe3->tos = csk->tos; - l4kwqe3->ttl = csk->ttl; - l4kwqe3->snd_seq_scale = csk->snd_seq_scale; - l4kwqe3->pmtu = csk->mtu; - l4kwqe3->rcv_buf = csk->rcv_buf; - l4kwqe3->snd_buf = csk->snd_buf; - l4kwqe3->seed = csk->seed; - - wqes[0] = (struct kwqe *) l4kwqe1; - if (test_bit(SK_F_IPV6, &csk->flags)) { - wqes[1] = (struct kwqe *) l4kwqe2; - wqes[2] = (struct kwqe *) l4kwqe3; - num_wqes = 3; - - l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6; - l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2; - l4kwqe2->flags = - L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT | - L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT; - l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]); - l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]); - l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]); - l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]); - l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]); - l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]); - l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) - - sizeof(struct tcphdr); - } else { - wqes[1] = (struct kwqe *) l4kwqe3; - l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) - - sizeof(struct tcphdr); - } - - l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1; - l4kwqe1->flags = - (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) | - L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT; - l4kwqe1->cid = csk->cid; - l4kwqe1->pg_cid = csk->pg_cid; - l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]); - l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]); - l4kwqe1->src_port = be16_to_cpu(csk->src_port); - l4kwqe1->dst_port = be16_to_cpu(csk->dst_port); - if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK) - tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK; - if (csk->tcp_flags & SK_TCP_KEEP_ALIVE) - tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE; - if (csk->tcp_flags & SK_TCP_NAGLE) - tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE; - if (csk->tcp_flags & SK_TCP_TIMESTAMP) - tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP; - if (csk->tcp_flags & SK_TCP_SACK) - tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK; - if (csk->tcp_flags & SK_TCP_SEG_SCALING) - tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING; - - l4kwqe1->tcp_flags = tcp_flags; - - return dev->submit_kwqes(dev, wqes, num_wqes); -} - -static int cnic_cm_close_req(struct cnic_sock *csk) -{ - struct cnic_dev *dev = csk->dev; - struct l4_kwq_close_req *l4kwqe; - struct kwqe *wqes[1]; - - l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2; - memset(l4kwqe, 0, sizeof(*l4kwqe)); - wqes[0] = (struct kwqe *) l4kwqe; - - l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE; - l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT; - l4kwqe->cid = csk->cid; - - return dev->submit_kwqes(dev, wqes, 1); -} - -static int cnic_cm_abort_req(struct cnic_sock *csk) -{ - struct cnic_dev *dev = csk->dev; - struct l4_kwq_reset_req *l4kwqe; - struct kwqe *wqes[1]; - - l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2; - memset(l4kwqe, 0, sizeof(*l4kwqe)); - wqes[0] = (struct kwqe *) l4kwqe; - - l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET; - l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT; - l4kwqe->cid = csk->cid; - - return dev->submit_kwqes(dev, wqes, 1); -} - -static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, - u32 l5_cid, struct cnic_sock **csk, void *context) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_sock *csk1; - - if (l5_cid >= MAX_CM_SK_TBL_SZ) - return -EINVAL; - - csk1 = &cp->csk_tbl[l5_cid]; - if (atomic_read(&csk1->ref_count)) - return -EAGAIN; - - if (test_and_set_bit(SK_F_INUSE, &csk1->flags)) - return -EBUSY; - - csk1->dev = dev; - csk1->cid = cid; - csk1->l5_cid = l5_cid; - csk1->ulp_type = ulp_type; - csk1->context = context; - - csk1->ka_timeout = DEF_KA_TIMEOUT; - csk1->ka_interval = DEF_KA_INTERVAL; - csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT; - csk1->tos = DEF_TOS; - csk1->ttl = DEF_TTL; - csk1->snd_seq_scale = DEF_SND_SEQ_SCALE; - csk1->rcv_buf = DEF_RCV_BUF; - csk1->snd_buf = DEF_SND_BUF; - csk1->seed = DEF_SEED; - - *csk = csk1; - return 0; -} - -static void cnic_cm_cleanup(struct cnic_sock *csk) -{ - if (csk->src_port) { - struct cnic_dev *dev = csk->dev; - struct cnic_local *cp = dev->cnic_priv; - - cnic_free_id(&cp->csk_port_tbl, csk->src_port); - csk->src_port = 0; - } -} - -static void cnic_close_conn(struct cnic_sock *csk) -{ - if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) { - cnic_cm_upload_pg(csk); - clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); - } - cnic_cm_cleanup(csk); -} - -static int cnic_cm_destroy(struct cnic_sock *csk) -{ - if (!cnic_in_use(csk)) - return -EINVAL; - - csk_hold(csk); - clear_bit(SK_F_INUSE, &csk->flags); - smp_mb__after_clear_bit(); - while (atomic_read(&csk->ref_count) != 1) - msleep(1); - cnic_cm_cleanup(csk); - - csk->flags = 0; - csk_put(csk); - return 0; -} - -static inline u16 cnic_get_vlan(struct net_device *dev, - struct net_device **vlan_dev) -{ - if (dev->priv_flags & IFF_802_1Q_VLAN) { - *vlan_dev = vlan_dev_real_dev(dev); - return vlan_dev_vlan_id(dev); - } - *vlan_dev = dev; - return 0; -} - -static int cnic_get_v4_route(struct sockaddr_in *dst_addr, - struct dst_entry **dst) -{ - struct flowi fl; - int err; - struct rtable *rt; - - memset(&fl, 0, sizeof(fl)); - fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr; - - err = ip_route_output_key(&init_net, &rt, &fl); - if (!err) - *dst = &rt->u.dst; - return err; -} - -static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, - struct dst_entry **dst) -{ -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) - struct flowi fl; - - memset(&fl, 0, sizeof(fl)); - ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr); - if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL) - fl.oif = dst_addr->sin6_scope_id; - - *dst = ip6_route_output(&init_net, NULL, &fl); - if (*dst) - return 0; -#endif - - return -ENETUNREACH; -} - -static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr, - int ulp_type) -{ - struct cnic_dev *dev = NULL; - struct dst_entry *dst; - struct net_device *netdev = NULL; - int err = -ENETUNREACH; - - if (dst_addr->sin_family == AF_INET) - err = cnic_get_v4_route(dst_addr, &dst); - else if (dst_addr->sin_family == AF_INET6) { - struct sockaddr_in6 *dst_addr6 = - (struct sockaddr_in6 *) dst_addr; - - err = cnic_get_v6_route(dst_addr6, &dst); - } else - return NULL; - - if (err) - return NULL; - - if (!dst->dev) - goto done; - - cnic_get_vlan(dst->dev, &netdev); - - dev = cnic_from_netdev(netdev); - -done: - dst_release(dst); - if (dev) - cnic_put(dev); - return dev; -} - -static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr) -{ - struct cnic_dev *dev = csk->dev; - struct cnic_local *cp = dev->cnic_priv; - - return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk); -} - -static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) -{ - struct cnic_dev *dev = csk->dev; - struct cnic_local *cp = dev->cnic_priv; - int is_v6, err, rc = -ENETUNREACH; - struct dst_entry *dst; - struct net_device *realdev; - u32 local_port; - - if (saddr->local.v6.sin6_family == AF_INET6 && - saddr->remote.v6.sin6_family == AF_INET6) - is_v6 = 1; - else if (saddr->local.v4.sin_family == AF_INET && - saddr->remote.v4.sin_family == AF_INET) - is_v6 = 0; - else - return -EINVAL; - - clear_bit(SK_F_IPV6, &csk->flags); - - if (is_v6) { -#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) - set_bit(SK_F_IPV6, &csk->flags); - err = cnic_get_v6_route(&saddr->remote.v6, &dst); - if (err) - return err; - - if (!dst || dst->error || !dst->dev) - goto err_out; - - memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr, - sizeof(struct in6_addr)); - csk->dst_port = saddr->remote.v6.sin6_port; - local_port = saddr->local.v6.sin6_port; -#else - return rc; -#endif - - } else { - err = cnic_get_v4_route(&saddr->remote.v4, &dst); - if (err) - return err; - - if (!dst || dst->error || !dst->dev) - goto err_out; - - csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr; - csk->dst_port = saddr->remote.v4.sin_port; - local_port = saddr->local.v4.sin_port; - } - - csk->vlan_id = cnic_get_vlan(dst->dev, &realdev); - if (realdev != dev->netdev) - goto err_out; - - if (local_port >= CNIC_LOCAL_PORT_MIN && - local_port < CNIC_LOCAL_PORT_MAX) { - if (cnic_alloc_id(&cp->csk_port_tbl, local_port)) - local_port = 0; - } else - local_port = 0; - - if (!local_port) { - local_port = cnic_alloc_new_id(&cp->csk_port_tbl); - if (local_port == -1) { - rc = -ENOMEM; - goto err_out; - } - } - csk->src_port = local_port; - - csk->mtu = dst_mtu(dst); - rc = 0; - -err_out: - dst_release(dst); - return rc; -} - -static void cnic_init_csk_state(struct cnic_sock *csk) -{ - csk->state = 0; - clear_bit(SK_F_OFFLD_SCHED, &csk->flags); - clear_bit(SK_F_CLOSING, &csk->flags); -} - -static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) -{ - int err = 0; - - if (!cnic_in_use(csk)) - return -EINVAL; - - if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags)) - return -EINVAL; - - cnic_init_csk_state(csk); - - err = cnic_get_route(csk, saddr); - if (err) - goto err_out; - - err = cnic_resolve_addr(csk, saddr); - if (!err) - return 0; - -err_out: - clear_bit(SK_F_CONNECT_START, &csk->flags); - return err; -} - -static int cnic_cm_abort(struct cnic_sock *csk) -{ - struct cnic_local *cp = csk->dev->cnic_priv; - u32 opcode; - - if (!cnic_in_use(csk)) - return -EINVAL; - - if (cnic_abort_prep(csk)) - return cnic_cm_abort_req(csk); - - /* Getting here means that we haven't started connect, or - * connect was not successful. - */ - - csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; - if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) - opcode = csk->state; - else - opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; - cp->close_conn(csk, opcode); - - return 0; -} - -static int cnic_cm_close(struct cnic_sock *csk) -{ - if (!cnic_in_use(csk)) - return -EINVAL; - - if (cnic_close_prep(csk)) { - csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; - return cnic_cm_close_req(csk); - } - return 0; -} - -static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk, - u8 opcode) -{ - struct cnic_ulp_ops *ulp_ops; - int ulp_type = csk->ulp_type; - - rcu_read_lock(); - ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); - if (ulp_ops) { - if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE) - ulp_ops->cm_connect_complete(csk); - else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) - ulp_ops->cm_close_complete(csk); - else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) - ulp_ops->cm_remote_abort(csk); - else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) - ulp_ops->cm_abort_complete(csk); - else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED) - ulp_ops->cm_remote_close(csk); - } - rcu_read_unlock(); -} - -static int cnic_cm_set_pg(struct cnic_sock *csk) -{ - if (cnic_offld_prep(csk)) { - if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) - cnic_cm_update_pg(csk); - else - cnic_cm_offload_pg(csk); - } - return 0; -} - -static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe) -{ - struct cnic_local *cp = dev->cnic_priv; - u32 l5_cid = kcqe->pg_host_opaque; - u8 opcode = kcqe->op_code; - struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; - - csk_hold(csk); - if (!cnic_in_use(csk)) - goto done; - - if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { - clear_bit(SK_F_OFFLD_SCHED, &csk->flags); - goto done; - } - csk->pg_cid = kcqe->pg_cid; - set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); - cnic_cm_conn_req(csk); - -done: - csk_put(csk); -} - -static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) -{ - struct cnic_local *cp = dev->cnic_priv; - struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe; - u8 opcode = l4kcqe->op_code; - u32 l5_cid; - struct cnic_sock *csk; - - if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || - opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { - cnic_cm_process_offld_pg(dev, l4kcqe); - return; - } - - l5_cid = l4kcqe->conn_id; - if (opcode & 0x80) - l5_cid = l4kcqe->cid; - if (l5_cid >= MAX_CM_SK_TBL_SZ) - return; - - csk = &cp->csk_tbl[l5_cid]; - csk_hold(csk); - - if (!cnic_in_use(csk)) { - csk_put(csk); - return; - } - - switch (opcode) { - case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE: - if (l4kcqe->status == 0) - set_bit(SK_F_OFFLD_COMPLETE, &csk->flags); - - smp_mb__before_clear_bit(); - clear_bit(SK_F_OFFLD_SCHED, &csk->flags); - cnic_cm_upcall(cp, csk, opcode); - break; - - case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: - if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) - csk->state = opcode; - /* fall through */ - case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: - case L4_KCQE_OPCODE_VALUE_RESET_COMP: - cp->close_conn(csk, opcode); - break; - - case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED: - cnic_cm_upcall(cp, csk, opcode); - break; - } - csk_put(csk); -} - -static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num) -{ - struct cnic_dev *dev = data; - int i; - - for (i = 0; i < num; i++) - cnic_cm_process_kcqe(dev, kcqe[i]); -} - -static struct cnic_ulp_ops cm_ulp_ops = { - .indicate_kcqes = cnic_cm_indicate_kcqe, -}; - -static void cnic_cm_free_mem(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - - kfree(cp->csk_tbl); - cp->csk_tbl = NULL; - cnic_free_id_tbl(&cp->csk_port_tbl); -} - -static int cnic_cm_alloc_mem(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - - cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ, - GFP_KERNEL); - if (!cp->csk_tbl) - return -ENOMEM; - - if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, - CNIC_LOCAL_PORT_MIN)) { - cnic_cm_free_mem(dev); - return -ENOMEM; - } - return 0; -} - -static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) -{ - if ((opcode == csk->state) || - (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED && - csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) { - if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) - return 1; - } - return 0; -} - -static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode) -{ - struct cnic_dev *dev = csk->dev; - struct cnic_local *cp = dev->cnic_priv; - - clear_bit(SK_F_CONNECT_START, &csk->flags); - if (cnic_ready_to_close(csk, opcode)) { - cnic_close_conn(csk); - cnic_cm_upcall(cp, csk, opcode); - } -} - -static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev) -{ -} - -static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) -{ - u32 seed; - - get_random_bytes(&seed, 4); - cnic_ctx_wr(dev, 45, 0, seed); - return 0; -} - -static int cnic_cm_open(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - int err; - - err = cnic_cm_alloc_mem(dev); - if (err) - return err; - - err = cp->start_cm(dev); - - if (err) - goto err_out; - - dev->cm_create = cnic_cm_create; - dev->cm_destroy = cnic_cm_destroy; - dev->cm_connect = cnic_cm_connect; - dev->cm_abort = cnic_cm_abort; - dev->cm_close = cnic_cm_close; - dev->cm_select_dev = cnic_cm_select_dev; - - cp->ulp_handle[CNIC_ULP_L4] = dev; - rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops); - return 0; - -err_out: - cnic_cm_free_mem(dev); - return err; -} - -static int cnic_cm_shutdown(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - int i; - - cp->stop_cm(dev); - - if (!cp->csk_tbl) - return 0; - - for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) { - struct cnic_sock *csk = &cp->csk_tbl[i]; - - clear_bit(SK_F_INUSE, &csk->flags); - cnic_cm_cleanup(csk); - } - cnic_cm_free_mem(dev); - - return 0; -} - -static void cnic_init_context(struct cnic_dev *dev, u32 cid) -{ - struct cnic_local *cp = dev->cnic_priv; - u32 cid_addr; - int i; - - if (CHIP_NUM(cp) == CHIP_NUM_5709) - return; - - cid_addr = GET_CID_ADDR(cid); - - for (i = 0; i < CTX_SIZE; i += 4) - cnic_ctx_wr(dev, cid_addr, i, 0); -} - -static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) -{ - struct cnic_local *cp = dev->cnic_priv; - int ret = 0, i; - u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0; - - if (CHIP_NUM(cp) != CHIP_NUM_5709) - return 0; - - for (i = 0; i < cp->ctx_blks; i++) { - int j; - u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; - u32 val; - - memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE); - - CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, - (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); - CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1, - (u64) cp->ctx_arr[i].mapping >> 32); - CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx | - BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); - for (j = 0; j < 10; j++) { - - val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL); - if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) - break; - udelay(5); - } - if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { - ret = -EBUSY; - break; - } - } - return ret; -} - -static void cnic_free_irq(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - - if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { - cp->disable_int_sync(dev); - tasklet_disable(&cp->cnic_irq_task); - free_irq(ethdev->irq_arr[0].vector, dev); - } -} - -static int cnic_init_bnx2_irq(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - - if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { - int err, i = 0; - int sblk_num = cp->status_blk_num; - u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) + - BNX2_HC_SB_CONFIG_1; - - CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT); - - CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8); - CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220); - CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220); - - cp->bnx2_status_blk = cp->status_blk; - cp->last_status_idx = cp->bnx2_status_blk->status_idx; - tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix, - (unsigned long) dev); - err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, - "cnic", dev); - if (err) { - tasklet_disable(&cp->cnic_irq_task); - return err; - } - while (cp->bnx2_status_blk->status_completion_producer_index && - i < 10) { - CNIC_WR(dev, BNX2_HC_COALESCE_NOW, - 1 << (11 + sblk_num)); - udelay(10); - i++; - barrier(); - } - if (cp->bnx2_status_blk->status_completion_producer_index) { - cnic_free_irq(dev); - goto failed; - } - - } else { - struct status_block *sblk = cp->status_blk; - u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND); - int i = 0; - - while (sblk->status_completion_producer_index && i < 10) { - CNIC_WR(dev, BNX2_HC_COMMAND, - hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); - udelay(10); - i++; - barrier(); - } - if (sblk->status_completion_producer_index) - goto failed; - - } - return 0; - -failed: - printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n", - dev->netdev->name); - return -EBUSY; -} - -static void cnic_enable_bnx2_int(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - - if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) - return; - - CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | - BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); -} - -static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - - if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) - return; - - CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | - BNX2_PCICFG_INT_ACK_CMD_MASK_INT); - CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD); - synchronize_irq(ethdev->irq_arr[0].vector); -} - -static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - u32 cid_addr, tx_cid, sb_id; - u32 val, offset0, offset1, offset2, offset3; - int i; - struct tx_bd *txbd; - dma_addr_t buf_map; - struct status_block *s_blk = cp->status_blk; - - sb_id = cp->status_blk_num; - tx_cid = 20; - cnic_init_context(dev, tx_cid); - cnic_init_context(dev, tx_cid + 1); - cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; - if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { - struct status_block_msix *sblk = cp->status_blk; - - tx_cid = TX_TSS_CID + sb_id - 1; - cnic_init_context(dev, tx_cid); - CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | - (TX_TSS_CID << 7)); - cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; - } - cp->tx_cons = *cp->tx_cons_ptr; - - cid_addr = GET_CID_ADDR(tx_cid); - if (CHIP_NUM(cp) == CHIP_NUM_5709) { - u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40; - - for (i = 0; i < PHY_CTX_SIZE; i += 4) - cnic_ctx_wr(dev, cid_addr2, i, 0); - - offset0 = BNX2_L2CTX_TYPE_XI; - offset1 = BNX2_L2CTX_CMD_TYPE_XI; - offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; - offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; - } else { - offset0 = BNX2_L2CTX_TYPE; - offset1 = BNX2_L2CTX_CMD_TYPE; - offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; - offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; - } - val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; - cnic_ctx_wr(dev, cid_addr, offset0, val); - - val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); - cnic_ctx_wr(dev, cid_addr, offset1, val); - - txbd = (struct tx_bd *) cp->l2_ring; - - buf_map = cp->l2_buf_map; - for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) { - txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; - txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; - } - val = (u64) cp->l2_ring_map >> 32; - cnic_ctx_wr(dev, cid_addr, offset2, val); - txbd->tx_bd_haddr_hi = val; - - val = (u64) cp->l2_ring_map & 0xffffffff; - cnic_ctx_wr(dev, cid_addr, offset3, val); - txbd->tx_bd_haddr_lo = val; -} - -static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - u32 cid_addr, sb_id, val, coal_reg, coal_val; - int i; - struct rx_bd *rxbd; - struct status_block *s_blk = cp->status_blk; - - sb_id = cp->status_blk_num; - cnic_init_context(dev, 2); - cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2; - coal_reg = BNX2_HC_COMMAND; - coal_val = CNIC_RD(dev, coal_reg); - if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { - struct status_block_msix *sblk = cp->status_blk; - - cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index; - coal_reg = BNX2_HC_COALESCE_NOW; - coal_val = 1 << (11 + sb_id); - } - i = 0; - while (!(*cp->rx_cons_ptr != 0) && i < 10) { - CNIC_WR(dev, coal_reg, coal_val); - udelay(10); - i++; - barrier(); - } - cp->rx_cons = *cp->rx_cons_ptr; - - cid_addr = GET_CID_ADDR(2); - val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | - BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); - cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); - - if (sb_id == 0) - val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT; - else - val = BNX2_L2CTX_STATUSB_NUM(sb_id); - cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); - - rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE); - for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { - dma_addr_t buf_map; - int n = (i % cp->l2_rx_ring_size) + 1; - - buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size); - rxbd->rx_bd_len = cp->l2_single_buf_size; - rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; - rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; - rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; - } - val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32; - cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); - rxbd->rx_bd_haddr_hi = val; - - val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff; - cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); - rxbd->rx_bd_haddr_lo = val; - - val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD); - cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2)); -} - -static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) -{ - struct kwqe *wqes[1], l2kwqe; - - memset(&l2kwqe, 0, sizeof(l2kwqe)); - wqes[0] = &l2kwqe; - l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) | - (L2_KWQE_OPCODE_VALUE_FLUSH << - KWQE_OPCODE_SHIFT) | 2; - dev->submit_kwqes(dev, wqes, 1); -} - -static void cnic_set_bnx2_mac(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - u32 val; - - val = cp->func << 2; - - cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val); - - val = cnic_reg_rd_ind(dev, cp->shmem_base + - BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER); - dev->mac_addr[0] = (u8) (val >> 8); - dev->mac_addr[1] = (u8) val; - - CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val); - - val = cnic_reg_rd_ind(dev, cp->shmem_base + - BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER); - dev->mac_addr[2] = (u8) (val >> 24); - dev->mac_addr[3] = (u8) (val >> 16); - dev->mac_addr[4] = (u8) (val >> 8); - dev->mac_addr[5] = (u8) val; - - CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val); - - val = 4 | BNX2_RPM_SORT_USER2_BC_EN; - if (CHIP_NUM(cp) != CHIP_NUM_5709) - val |= BNX2_RPM_SORT_USER2_PROM_VLAN; - - CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0); - CNIC_WR(dev, BNX2_RPM_SORT_USER2, val); - CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA); -} - -static int cnic_start_bnx2_hw(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - struct status_block *sblk = cp->status_blk; - u32 val; - int err; - - cnic_set_bnx2_mac(dev); - - val = CNIC_RD(dev, BNX2_MQ_CONFIG); - val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; - if (BCM_PAGE_BITS > 12) - val |= (12 - 8) << 4; - else - val |= (BCM_PAGE_BITS - 8) << 4; - - CNIC_WR(dev, BNX2_MQ_CONFIG, val); - - CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8); - CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220); - CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220); - - err = cnic_setup_5709_context(dev, 1); - if (err) - return err; - - cnic_init_context(dev, KWQ_CID); - cnic_init_context(dev, KCQ_CID); - - cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID); - cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; - - cp->max_kwq_idx = MAX_KWQ_IDX; - cp->kwq_prod_idx = 0; - cp->kwq_con_idx = 0; - cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT; - - if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708) - cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; - else - cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index; - - /* Initialize the kernel work queue context. */ - val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | - (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; - cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val); - - val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; - cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); - - val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; - cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); - - val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); - cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); - - val = (u32) cp->kwq_info.pgtbl_map; - cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); - - cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID); - cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; - - cp->kcq_prod_idx = 0; - - /* Initialize the kernel complete queue context. */ - val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | - (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; - cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val); - - val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; - cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); - - val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; - cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); - - val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32); - cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); - - val = (u32) cp->kcq_info.pgtbl_map; - cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); - - cp->int_num = 0; - if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { - u32 sb_id = cp->status_blk_num; - u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id); - - cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; - cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); - cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); - } - - /* Enable Commnad Scheduler notification when we write to the - * host producer index of the kernel contexts. */ - CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2); - - /* Enable Command Scheduler notification when we write to either - * the Send Queue or Receive Queue producer indexes of the kernel - * bypass contexts. */ - CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7); - CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7); - - /* Notify COM when the driver post an application buffer. */ - CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000); - - /* Set the CP and COM doorbells. These two processors polls the - * doorbell for a non zero value before running. This must be done - * after setting up the kernel queue contexts. */ - cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1); - cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1); - - cnic_init_bnx2_tx_ring(dev); - cnic_init_bnx2_rx_ring(dev); - - err = cnic_init_bnx2_irq(dev); - if (err) { - printk(KERN_ERR PFX "%s: cnic_init_irq failed\n", - dev->netdev->name); - cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); - cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); - return err; - } - - return 0; -} - -static int cnic_start_hw(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - int err; - - if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) - return -EALREADY; - - err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); - if (err) { - printk(KERN_ERR PFX "%s: register_cnic failed\n", - dev->netdev->name); - goto err2; - } - - dev->regview = ethdev->io_base; - cp->chip_id = ethdev->chip_id; - pci_dev_get(dev->pcidev); - cp->func = PCI_FUNC(dev->pcidev->devfn); - cp->status_blk = ethdev->irq_arr[0].status_blk; - cp->status_blk_num = ethdev->irq_arr[0].status_blk_num; - - err = cp->alloc_resc(dev); - if (err) { - printk(KERN_ERR PFX "%s: allocate resource failure\n", - dev->netdev->name); - goto err1; - } - - err = cp->start_hw(dev); - if (err) - goto err1; - - err = cnic_cm_open(dev); - if (err) - goto err1; - - set_bit(CNIC_F_CNIC_UP, &dev->flags); - - cp->enable_int(dev); - - return 0; - -err1: - ethdev->drv_unregister_cnic(dev->netdev); - cp->free_resc(dev); - pci_dev_put(dev->pcidev); -err2: - return err; -} - -static void cnic_stop_bnx2_hw(struct cnic_dev *dev) -{ - struct cnic_local *cp = dev->cnic_priv; - struct cnic_eth_dev *ethdev = cp->ethdev; - - cnic_disable_bnx2_int_sync(dev); - - cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); - cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); - - cnic_init_context(dev, KWQ_CID); - cnic_init_context(dev, KCQ_CID); - - cnic_setup_5709_context(dev, 0); - cnic_free_irq(dev); - - ethdev->drv_unregister_cnic(dev->netdev); - - cnic_free_resc(dev); -} - -static void cnic_stop_hw(struct cnic_dev *dev) -{ - if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { - struct cnic_local *cp = dev->cnic_priv; - - clear_bit(CNIC_F_CNIC_UP, &dev->flags); - rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); - synchronize_rcu(); - cnic_cm_shutdown(dev); - cp->stop_hw(dev); - pci_dev_put(dev->pcidev); - } -} - -static void cnic_free_dev(struct cnic_dev *dev) -{ - int i = 0; - - while ((atomic_read(&dev->ref_count) != 0) && i < 10) { - msleep(100); - i++; - } - if (atomic_read(&dev->ref_count) != 0) - printk(KERN_ERR PFX "%s: Failed waiting for ref count to go" - " to zero.\n", dev->netdev->name); - - printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name); - dev_put(dev->netdev); - kfree(dev); -} - -static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, - struct pci_dev *pdev) -{ - struct cnic_dev *cdev; - struct cnic_local *cp; - int alloc_size; - - alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local); - - cdev = kzalloc(alloc_size , GFP_KERNEL); - if (cdev == NULL) { - printk(KERN_ERR PFX "%s: allocate dev struct failure\n", - dev->name); - return NULL; - } - - cdev->netdev = dev; - cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev); - cdev->register_device = cnic_register_device; - cdev->unregister_device = cnic_unregister_device; - cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; - - cp = cdev->cnic_priv; - cp->dev = cdev; - cp->uio_dev = -1; - cp->l2_single_buf_size = 0x400; - cp->l2_rx_ring_size = 3; - - spin_lock_init(&cp->cnic_ulp_lock); - - printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name); - - return cdev; -} - -static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) -{ - struct pci_dev *pdev; - struct cnic_dev *cdev; - struct cnic_local *cp; - struct cnic_eth_dev *ethdev = NULL; - struct cnic_eth_dev *(*probe)(void *) = NULL; - - probe = __symbol_get("bnx2_cnic_probe"); - if (probe) { - ethdev = (*probe)(dev); - symbol_put_addr(probe); - } - if (!ethdev) - return NULL; - - pdev = ethdev->pdev; - if (!pdev) - return NULL; - - dev_hold(dev); - pci_dev_get(pdev); - if (pdev->device == PCI_DEVICE_ID_NX2_5709 || - pdev->device == PCI_DEVICE_ID_NX2_5709S) { - u8 rev; - - pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); - if (rev < 0x10) { - pci_dev_put(pdev); - goto cnic_err; - } - } - pci_dev_put(pdev); - - cdev = cnic_alloc_dev(dev, pdev); - if (cdev == NULL) - goto cnic_err; - - set_bit(CNIC_F_BNX2_CLASS, &cdev->flags); - cdev->submit_kwqes = cnic_submit_bnx2_kwqes; - - cp = cdev->cnic_priv; - cp->ethdev = ethdev; - cdev->pcidev = pdev; - - cp->cnic_ops = &cnic_bnx2_ops; - cp->start_hw = cnic_start_bnx2_hw; - cp->stop_hw = cnic_stop_bnx2_hw; - cp->setup_pgtbl = cnic_setup_page_tbl; - cp->alloc_resc = cnic_alloc_bnx2_resc; - cp->free_resc = cnic_free_resc; - cp->start_cm = cnic_cm_init_bnx2_hw; - cp->stop_cm = cnic_cm_stop_bnx2_hw; - cp->enable_int = cnic_enable_bnx2_int; - cp->disable_int_sync = cnic_disable_bnx2_int_sync; - cp->close_conn = cnic_close_bnx2_conn; - cp->next_idx = cnic_bnx2_next_idx; - cp->hw_idx = cnic_bnx2_hw_idx; - return cdev; - -cnic_err: - dev_put(dev); - return NULL; -} - -static struct cnic_dev *is_cnic_dev(struct net_device *dev) -{ - struct ethtool_drvinfo drvinfo; - struct cnic_dev *cdev = NULL; - - if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) { - memset(&drvinfo, 0, sizeof(drvinfo)); - dev->ethtool_ops->get_drvinfo(dev, &drvinfo); - - if (!strcmp(drvinfo.driver, "bnx2")) - cdev = init_bnx2_cnic(dev); - if (cdev) { - write_lock(&cnic_dev_lock); - list_add(&cdev->list, &cnic_dev_list); - write_unlock(&cnic_dev_lock); - } - } - return cdev; -} - -/** - * netdev event handler - */ -static int cnic_netdev_event(struct notifier_block *this, unsigned long event, - void *ptr) -{ - struct net_device *netdev = ptr; - struct cnic_dev *dev; - int if_type; - int new_dev = 0; - - dev = cnic_from_netdev(netdev); - - if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) { - /* Check for the hot-plug device */ - dev = is_cnic_dev(netdev); - if (dev) { - new_dev = 1; - cnic_hold(dev); - } - } - if (dev) { - struct cnic_local *cp = dev->cnic_priv; - - if (new_dev) - cnic_ulp_init(dev); - else if (event == NETDEV_UNREGISTER) - cnic_ulp_exit(dev); - else if (event == NETDEV_UP) { - mutex_lock(&cnic_lock); - if (!cnic_start_hw(dev)) - cnic_ulp_start(dev); - mutex_unlock(&cnic_lock); - } - - rcu_read_lock(); - for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { - struct cnic_ulp_ops *ulp_ops; - void *ctx; - - ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); - if (!ulp_ops || !ulp_ops->indicate_netevent) - continue; - - ctx = cp->ulp_handle[if_type]; - - ulp_ops->indicate_netevent(ctx, event); - } - rcu_read_unlock(); - - if (event == NETDEV_GOING_DOWN) { - mutex_lock(&cnic_lock); - cnic_ulp_stop(dev); - cnic_stop_hw(dev); - mutex_unlock(&cnic_lock); - } else if (event == NETDEV_UNREGISTER) { - write_lock(&cnic_dev_lock); - list_del_init(&dev->list); - write_unlock(&cnic_dev_lock); - - cnic_put(dev); - cnic_free_dev(dev); - goto done; - } - cnic_put(dev); - } -done: - return NOTIFY_DONE; -} - -static struct notifier_block cnic_netdev_notifier = { - .notifier_call = cnic_netdev_event -}; - -static void cnic_release(void) -{ - struct cnic_dev *dev; - - while (!list_empty(&cnic_dev_list)) { - dev = list_entry(cnic_dev_list.next, struct cnic_dev, list); - if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { - cnic_ulp_stop(dev); - cnic_stop_hw(dev); - } - - cnic_ulp_exit(dev); - list_del_init(&dev->list); - cnic_free_dev(dev); - } -} - -static int __init cnic_init(void) -{ - int rc = 0; - - printk(KERN_INFO "%s", version); - - rc = register_netdevice_notifier(&cnic_netdev_notifier); - if (rc) { - cnic_release(); - return rc; - } - - return 0; -} - -static void __exit cnic_exit(void) -{ - unregister_netdevice_notifier(&cnic_netdev_notifier); - cnic_release(); - return; -} - -module_init(cnic_init); -module_exit(cnic_exit); diff --git a/trunk/drivers/net/cnic.h b/trunk/drivers/net/cnic.h deleted file mode 100644 index 5192d4a9df5a..000000000000 --- a/trunk/drivers/net/cnic.h +++ /dev/null @@ -1,299 +0,0 @@ -/* cnic.h: Broadcom CNIC core network driver. - * - * Copyright (c) 2006-2009 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - */ - - -#ifndef CNIC_H -#define CNIC_H - -#define KWQ_PAGE_CNT 4 -#define KCQ_PAGE_CNT 16 - -#define KWQ_CID 24 -#define KCQ_CID 25 - -/* - * krnlq_context definition - */ -#define L5_KRNLQ_FLAGS 0x00000000 -#define L5_KRNLQ_SIZE 0x00000000 -#define L5_KRNLQ_TYPE 0x00000000 -#define KRNLQ_FLAGS_PG_SZ (0xf<<0) -#define KRNLQ_FLAGS_PG_SZ_256 (0<<0) -#define KRNLQ_FLAGS_PG_SZ_512 (1<<0) -#define KRNLQ_FLAGS_PG_SZ_1K (2<<0) -#define KRNLQ_FLAGS_PG_SZ_2K (3<<0) -#define KRNLQ_FLAGS_PG_SZ_4K (4<<0) -#define KRNLQ_FLAGS_PG_SZ_8K (5<<0) -#define KRNLQ_FLAGS_PG_SZ_16K (6<<0) -#define KRNLQ_FLAGS_PG_SZ_32K (7<<0) -#define KRNLQ_FLAGS_PG_SZ_64K (8<<0) -#define KRNLQ_FLAGS_PG_SZ_128K (9<<0) -#define KRNLQ_FLAGS_PG_SZ_256K (10<<0) -#define KRNLQ_FLAGS_PG_SZ_512K (11<<0) -#define KRNLQ_FLAGS_PG_SZ_1M (12<<0) -#define KRNLQ_FLAGS_PG_SZ_2M (13<<0) -#define KRNLQ_FLAGS_QE_SELF_SEQ (1<<15) -#define KRNLQ_SIZE_TYPE_SIZE ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16) -#define KRNLQ_TYPE_TYPE (0xf<<28) -#define KRNLQ_TYPE_TYPE_EMPTY (0<<28) -#define KRNLQ_TYPE_TYPE_KRNLQ (6<<28) - -#define L5_KRNLQ_HOST_QIDX 0x00000004 -#define L5_KRNLQ_HOST_FW_QIDX 0x00000008 -#define L5_KRNLQ_NX_QE_SELF_SEQ 0x0000000c -#define L5_KRNLQ_QE_SELF_SEQ_MAX 0x0000000c -#define L5_KRNLQ_NX_QE_HADDR_HI 0x00000010 -#define L5_KRNLQ_NX_QE_HADDR_LO 0x00000014 -#define L5_KRNLQ_PGTBL_PGIDX 0x00000018 -#define L5_KRNLQ_NX_PG_QIDX 0x00000018 -#define L5_KRNLQ_PGTBL_NPAGES 0x0000001c -#define L5_KRNLQ_QIDX_INCR 0x0000001c -#define L5_KRNLQ_PGTBL_HADDR_HI 0x00000020 -#define L5_KRNLQ_PGTBL_HADDR_LO 0x00000024 - -#define BNX2_PG_CTX_MAP 0x1a0034 -#define BNX2_ISCSI_CTX_MAP 0x1a0074 - -struct cnic_redirect_entry { - struct dst_entry *old_dst; - struct dst_entry *new_dst; -}; - -#define MAX_COMPLETED_KCQE 64 - -#define MAX_CNIC_L5_CONTEXT 256 - -#define MAX_CM_SK_TBL_SZ MAX_CNIC_L5_CONTEXT - -#define MAX_ISCSI_TBL_SZ 256 - -#define CNIC_LOCAL_PORT_MIN 60000 -#define CNIC_LOCAL_PORT_MAX 61000 -#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN) - -#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe)) -#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe)) -#define MAX_KWQE_CNT (KWQE_CNT - 1) -#define MAX_KCQE_CNT (KCQE_CNT - 1) - -#define MAX_KWQ_IDX ((KWQ_PAGE_CNT * KWQE_CNT) - 1) -#define MAX_KCQ_IDX ((KCQ_PAGE_CNT * KCQE_CNT) - 1) - -#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5)) -#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT) - -#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5)) -#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT) - -#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) == \ - (MAX_KCQE_CNT - 1)) ? \ - (x) + 2 : (x) + 1 - -#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp) -#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp) -#define BNX2X_KWQ_DATA(cp, x) \ - &(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)] - -#define DEF_IPID_COUNT 0xc001 - -#define DEF_KA_TIMEOUT 10000 -#define DEF_KA_INTERVAL 300000 -#define DEF_KA_MAX_PROBE_COUNT 3 -#define DEF_TOS 0 -#define DEF_TTL 0xfe -#define DEF_SND_SEQ_SCALE 0 -#define DEF_RCV_BUF 0xffff -#define DEF_SND_BUF 0xffff -#define DEF_SEED 0 -#define DEF_MAX_RT_TIME 500 -#define DEF_MAX_DA_COUNT 2 -#define DEF_SWS_TIMER 1000 -#define DEF_MAX_CWND 0xffff - -struct cnic_ctx { - u32 cid; - void *ctx; - dma_addr_t mapping; -}; - -#define BNX2_MAX_CID 0x2000 - -struct cnic_dma { - int num_pages; - void **pg_arr; - dma_addr_t *pg_map_arr; - int pgtbl_size; - u32 *pgtbl; - dma_addr_t pgtbl_map; -}; - -struct cnic_id_tbl { - spinlock_t lock; - u32 start; - u32 max; - u32 next; - unsigned long *table; -}; - -#define CNIC_KWQ16_DATA_SIZE 128 - -struct kwqe_16_data { - u8 data[CNIC_KWQ16_DATA_SIZE]; -}; - -struct cnic_iscsi { - struct cnic_dma task_array_info; - struct cnic_dma r2tq_info; - struct cnic_dma hq_info; -}; - -struct cnic_context { - u32 cid; - struct kwqe_16_data *kwqe_data; - dma_addr_t kwqe_data_mapping; - wait_queue_head_t waitq; - int wait_cond; - unsigned long timestamp; - u32 ctx_flags; -#define CTX_FL_OFFLD_START 0x00000001 - u8 ulp_proto_id; - union { - struct cnic_iscsi *iscsi; - } proto; -}; - -struct cnic_local { - - spinlock_t cnic_ulp_lock; - void *ulp_handle[MAX_CNIC_ULP_TYPE]; - unsigned long ulp_flags[MAX_CNIC_ULP_TYPE]; -#define ULP_F_INIT 0 -#define ULP_F_START 1 - struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE]; - - /* protected by ulp_lock */ - u32 cnic_local_flags; -#define CNIC_LCL_FL_KWQ_INIT 0x00000001 - - struct cnic_dev *dev; - - struct cnic_eth_dev *ethdev; - - void *l2_ring; - dma_addr_t l2_ring_map; - int l2_ring_size; - int l2_rx_ring_size; - - void *l2_buf; - dma_addr_t l2_buf_map; - int l2_buf_size; - int l2_single_buf_size; - - u16 *rx_cons_ptr; - u16 *tx_cons_ptr; - u16 rx_cons; - u16 tx_cons; - - u32 kwq_cid_addr; - u32 kcq_cid_addr; - - struct cnic_dma kwq_info; - struct kwqe **kwq; - - struct cnic_dma kwq_16_data_info; - - u16 max_kwq_idx; - - u16 kwq_prod_idx; - u32 kwq_io_addr; - - u16 *kwq_con_idx_ptr; - u16 kwq_con_idx; - - struct cnic_dma kcq_info; - struct kcqe **kcq; - - u16 kcq_prod_idx; - u32 kcq_io_addr; - - void *status_blk; - struct status_block_msix *bnx2_status_blk; - struct host_status_block *bnx2x_status_blk; - - u32 status_blk_num; - u32 int_num; - u32 last_status_idx; - struct tasklet_struct cnic_irq_task; - - struct kcqe *completed_kcq[MAX_COMPLETED_KCQE]; - - struct cnic_sock *csk_tbl; - struct cnic_id_tbl csk_port_tbl; - - struct cnic_dma conn_buf_info; - struct cnic_dma gbl_buf_info; - - struct cnic_iscsi *iscsi_tbl; - struct cnic_context *ctx_tbl; - struct cnic_id_tbl cid_tbl; - int max_iscsi_conn; - atomic_t iscsi_conn; - - /* per connection parameters */ - int num_iscsi_tasks; - int num_ccells; - int task_array_size; - int r2tq_size; - int hq_size; - int num_cqs; - - struct cnic_ctx *ctx_arr; - int ctx_blks; - int ctx_blk_size; - int cids_per_blk; - - u32 chip_id; - int func; - u32 shmem_base; - - u32 uio_dev; - struct uio_info *cnic_uinfo; - - struct cnic_ops *cnic_ops; - int (*start_hw)(struct cnic_dev *); - void (*stop_hw)(struct cnic_dev *); - void (*setup_pgtbl)(struct cnic_dev *, - struct cnic_dma *); - int (*alloc_resc)(struct cnic_dev *); - void (*free_resc)(struct cnic_dev *); - int (*start_cm)(struct cnic_dev *); - void (*stop_cm)(struct cnic_dev *); - void (*enable_int)(struct cnic_dev *); - void (*disable_int_sync)(struct cnic_dev *); - void (*ack_int)(struct cnic_dev *); - void (*close_conn)(struct cnic_sock *, u32 opcode); - u16 (*next_idx)(u16); - u16 (*hw_idx)(u16); -}; - -struct bnx2x_bd_chain_next { - u32 addr_lo; - u32 addr_hi; - u8 reserved[8]; -}; - -#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN) -#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT) - -#define CDU_REGION_NUMBER_XCM_AG 2 -#define CDU_REGION_NUMBER_UCM_AG 4 - -#endif - diff --git a/trunk/drivers/net/cnic_defs.h b/trunk/drivers/net/cnic_defs.h deleted file mode 100644 index cee80f694457..000000000000 --- a/trunk/drivers/net/cnic_defs.h +++ /dev/null @@ -1,580 +0,0 @@ - -/* cnic.c: Broadcom CNIC core network driver. - * - * Copyright (c) 2006-2009 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - */ - -#ifndef CNIC_DEFS_H -#define CNIC_DEFS_H - -/* KWQ (kernel work queue) request op codes */ -#define L2_KWQE_OPCODE_VALUE_FLUSH (4) - -#define L4_KWQE_OPCODE_VALUE_CONNECT1 (50) -#define L4_KWQE_OPCODE_VALUE_CONNECT2 (51) -#define L4_KWQE_OPCODE_VALUE_CONNECT3 (52) -#define L4_KWQE_OPCODE_VALUE_RESET (53) -#define L4_KWQE_OPCODE_VALUE_CLOSE (54) -#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET (60) -#define L4_KWQE_OPCODE_VALUE_INIT_ULP (61) - -#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG (1) -#define L4_KWQE_OPCODE_VALUE_UPDATE_PG (9) -#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG (14) - -#define L5CM_RAMROD_CMD_ID_BASE (0x80) -#define L5CM_RAMROD_CMD_ID_TCP_CONNECT (L5CM_RAMROD_CMD_ID_BASE + 3) -#define L5CM_RAMROD_CMD_ID_CLOSE (L5CM_RAMROD_CMD_ID_BASE + 12) -#define L5CM_RAMROD_CMD_ID_ABORT (L5CM_RAMROD_CMD_ID_BASE + 13) -#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14) -#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15) - -/* KCQ (kernel completion queue) response op codes */ -#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53) -#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54) -#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE (55) -#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE (56) -#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED (57) -#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED (58) -#define L4_KCQE_OPCODE_VALUE_INIT_ULP (61) - -#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG (1) -#define L4_KCQE_OPCODE_VALUE_UPDATE_PG (9) -#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG (14) - -/* KCQ (kernel completion queue) completion status */ -#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0) -#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93) - -#define L4_LAYER_CODE (4) -#define L2_LAYER_CODE (2) - -/* - * L4 KCQ CQE - */ -struct l4_kcq { - u32 cid; - u32 pg_cid; - u32 conn_id; - u32 pg_host_opaque; -#if defined(__BIG_ENDIAN) - u16 status; - u16 reserved1; -#elif defined(__LITTLE_ENDIAN) - u16 reserved1; - u16 status; -#endif - u32 reserved2[2]; -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KCQ_RESERVED3 (0x7<<0) -#define L4_KCQ_RESERVED3_SHIFT 0 -#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */ -#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3 -#define L4_KCQ_LAYER_CODE (0x7<<4) -#define L4_KCQ_LAYER_CODE_SHIFT 4 -#define L4_KCQ_RESERVED4 (0x1<<7) -#define L4_KCQ_RESERVED4_SHIFT 7 - u8 op_code; - u16 qe_self_seq; -#elif defined(__LITTLE_ENDIAN) - u16 qe_self_seq; - u8 op_code; - u8 flags; -#define L4_KCQ_RESERVED3 (0xF<<0) -#define L4_KCQ_RESERVED3_SHIFT 0 -#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */ -#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3 -#define L4_KCQ_LAYER_CODE (0x7<<4) -#define L4_KCQ_LAYER_CODE_SHIFT 4 -#define L4_KCQ_RESERVED4 (0x1<<7) -#define L4_KCQ_RESERVED4_SHIFT 7 -#endif -}; - - -/* - * L4 KCQ CQE PG upload - */ -struct l4_kcq_upload_pg { - u32 pg_cid; -#if defined(__BIG_ENDIAN) - u16 pg_status; - u16 pg_ipid_count; -#elif defined(__LITTLE_ENDIAN) - u16 pg_ipid_count; - u16 pg_status; -#endif - u32 reserved1[5]; -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0) -#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0 -#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4) -#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4 -#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7) -#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7 - u8 op_code; - u16 qe_self_seq; -#elif defined(__LITTLE_ENDIAN) - u16 qe_self_seq; - u8 op_code; - u8 flags; -#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0) -#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0 -#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4) -#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4 -#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7) -#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7 -#endif -}; - - -/* - * Gracefully close the connection request - */ -struct l4_kwq_close_req { -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0) -#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0 -#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4) -#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4 -#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7 - u8 op_code; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 op_code; - u8 flags; -#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0) -#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0 -#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4) -#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4 -#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7 -#endif - u32 cid; - u32 reserved2[6]; -}; - - -/* - * The first request to be passed in order to establish connection in option2 - */ -struct l4_kwq_connect_req1 { -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0) -#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0 -#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4) -#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4 -#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7 - u8 op_code; - u8 reserved0; - u8 conn_flags; -#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0) -#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0 -#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1) -#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1 -#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2) -#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2 -#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3) -#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3 -#elif defined(__LITTLE_ENDIAN) - u8 conn_flags; -#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0) -#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0 -#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1) -#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1 -#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2) -#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2 -#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3) -#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3 - u8 reserved0; - u8 op_code; - u8 flags; -#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0) -#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0 -#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4) -#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4 -#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7 -#endif - u32 cid; - u32 pg_cid; - u32 src_ip; - u32 dst_ip; -#if defined(__BIG_ENDIAN) - u16 dst_port; - u16 src_port; -#elif defined(__LITTLE_ENDIAN) - u16 src_port; - u16 dst_port; -#endif -#if defined(__BIG_ENDIAN) - u8 rsrv1[3]; - u8 tcp_flags; -#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0) -#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0 -#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1) -#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1 -#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2) -#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2 -#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3) -#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3 -#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4) -#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4 -#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5) -#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5 -#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6) -#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6 -#elif defined(__LITTLE_ENDIAN) - u8 tcp_flags; -#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0) -#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0 -#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1) -#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1 -#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2) -#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2 -#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3) -#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3 -#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4) -#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4 -#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5) -#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5 -#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6) -#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6 - u8 rsrv1[3]; -#endif - u32 rsrv2; -}; - - -/* - * The second ( optional )request to be passed in order to establish - * connection in option2 - for IPv6 only - */ -struct l4_kwq_connect_req2 { -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0) -#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0 -#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4) -#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4 -#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7 - u8 op_code; - u8 reserved0; - u8 rsrv; -#elif defined(__LITTLE_ENDIAN) - u8 rsrv; - u8 reserved0; - u8 op_code; - u8 flags; -#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0) -#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0 -#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4) -#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4 -#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7 -#endif - u32 reserved2; - u32 src_ip_v6_2; - u32 src_ip_v6_3; - u32 src_ip_v6_4; - u32 dst_ip_v6_2; - u32 dst_ip_v6_3; - u32 dst_ip_v6_4; -}; - - -/* - * The third ( and last )request to be passed in order to establish - * connection in option2 - */ -struct l4_kwq_connect_req3 { -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0) -#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0 -#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4) -#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4 -#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7 - u8 op_code; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 op_code; - u8 flags; -#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0) -#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0 -#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4) -#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4 -#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7 -#endif - u32 ka_timeout; - u32 ka_interval ; -#if defined(__BIG_ENDIAN) - u8 snd_seq_scale; - u8 ttl; - u8 tos; - u8 ka_max_probe_count; -#elif defined(__LITTLE_ENDIAN) - u8 ka_max_probe_count; - u8 tos; - u8 ttl; - u8 snd_seq_scale; -#endif -#if defined(__BIG_ENDIAN) - u16 pmtu; - u16 mss; -#elif defined(__LITTLE_ENDIAN) - u16 mss; - u16 pmtu; -#endif - u32 rcv_buf; - u32 snd_buf; - u32 seed; -}; - - -/* - * a KWQE request to offload a PG connection - */ -struct l4_kwq_offload_pg { -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0) -#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0 -#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4) -#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4 -#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7 - u8 op_code; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 op_code; - u8 flags; -#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0) -#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0 -#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4) -#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4 -#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7 -#endif -#if defined(__BIG_ENDIAN) - u8 l2hdr_nbytes; - u8 pg_flags; -#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0) -#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0 -#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1) -#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1 -#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2) -#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2 - u8 da0; - u8 da1; -#elif defined(__LITTLE_ENDIAN) - u8 da1; - u8 da0; - u8 pg_flags; -#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0) -#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0 -#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1) -#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1 -#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2) -#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2 - u8 l2hdr_nbytes; -#endif -#if defined(__BIG_ENDIAN) - u8 da2; - u8 da3; - u8 da4; - u8 da5; -#elif defined(__LITTLE_ENDIAN) - u8 da5; - u8 da4; - u8 da3; - u8 da2; -#endif -#if defined(__BIG_ENDIAN) - u8 sa0; - u8 sa1; - u8 sa2; - u8 sa3; -#elif defined(__LITTLE_ENDIAN) - u8 sa3; - u8 sa2; - u8 sa1; - u8 sa0; -#endif -#if defined(__BIG_ENDIAN) - u8 sa4; - u8 sa5; - u16 etype; -#elif defined(__LITTLE_ENDIAN) - u16 etype; - u8 sa5; - u8 sa4; -#endif -#if defined(__BIG_ENDIAN) - u16 vlan_tag; - u16 ipid_start; -#elif defined(__LITTLE_ENDIAN) - u16 ipid_start; - u16 vlan_tag; -#endif -#if defined(__BIG_ENDIAN) - u16 ipid_count; - u16 reserved3; -#elif defined(__LITTLE_ENDIAN) - u16 reserved3; - u16 ipid_count; -#endif - u32 host_opaque; -}; - - -/* - * Abortively close the connection request - */ -struct l4_kwq_reset_req { -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0) -#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0 -#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4) -#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4 -#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7 - u8 op_code; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 op_code; - u8 flags; -#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0) -#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0 -#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4) -#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4 -#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7 -#endif - u32 cid; - u32 reserved2[6]; -}; - - -/* - * a KWQE request to update a PG connection - */ -struct l4_kwq_update_pg { -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0) -#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0 -#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4) -#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4 -#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7 - u8 opcode; - u16 oper16; -#elif defined(__LITTLE_ENDIAN) - u16 oper16; - u8 opcode; - u8 flags; -#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0) -#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0 -#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4) -#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4 -#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7 -#endif - u32 pg_cid; - u32 pg_host_opaque; -#if defined(__BIG_ENDIAN) - u8 pg_valids; -#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0) -#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0 -#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1) -#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1 -#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2) -#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2 - u8 pg_unused_a; - u16 pg_ipid_count; -#elif defined(__LITTLE_ENDIAN) - u16 pg_ipid_count; - u8 pg_unused_a; - u8 pg_valids; -#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0) -#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0 -#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1) -#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1 -#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2) -#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2 -#endif -#if defined(__BIG_ENDIAN) - u16 reserverd3; - u8 da0; - u8 da1; -#elif defined(__LITTLE_ENDIAN) - u8 da1; - u8 da0; - u16 reserverd3; -#endif -#if defined(__BIG_ENDIAN) - u8 da2; - u8 da3; - u8 da4; - u8 da5; -#elif defined(__LITTLE_ENDIAN) - u8 da5; - u8 da4; - u8 da3; - u8 da2; -#endif - u32 reserved4; - u32 reserved5; -}; - - -/* - * a KWQE request to upload a PG or L4 context - */ -struct l4_kwq_upload { -#if defined(__BIG_ENDIAN) - u8 flags; -#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0) -#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0 -#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4) -#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4 -#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7 - u8 opcode; - u16 oper16; -#elif defined(__LITTLE_ENDIAN) - u16 oper16; - u8 opcode; - u8 flags; -#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0) -#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0 -#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4) -#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4 -#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7) -#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7 -#endif - u32 cid; - u32 reserved2[6]; -}; - -#endif /* CNIC_DEFS_H */ diff --git a/trunk/drivers/net/cnic_if.h b/trunk/drivers/net/cnic_if.h deleted file mode 100644 index 06380963a34e..000000000000 --- a/trunk/drivers/net/cnic_if.h +++ /dev/null @@ -1,299 +0,0 @@ -/* cnic_if.h: Broadcom CNIC core network driver. - * - * Copyright (c) 2006 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - */ - - -#ifndef CNIC_IF_H -#define CNIC_IF_H - -#define CNIC_MODULE_VERSION "2.0.0" -#define CNIC_MODULE_RELDATE "May 21, 2009" - -#define CNIC_ULP_RDMA 0 -#define CNIC_ULP_ISCSI 1 -#define CNIC_ULP_L4 2 -#define MAX_CNIC_ULP_TYPE_EXT 2 -#define MAX_CNIC_ULP_TYPE 3 - -struct kwqe { - u32 kwqe_op_flag; - -#define KWQE_OPCODE_MASK 0x00ff0000 -#define KWQE_OPCODE_SHIFT 16 -#define KWQE_FLAGS_LAYER_SHIFT 28 -#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT) - - u32 kwqe_info0; - u32 kwqe_info1; - u32 kwqe_info2; - u32 kwqe_info3; - u32 kwqe_info4; - u32 kwqe_info5; - u32 kwqe_info6; -}; - -struct kwqe_16 { - u32 kwqe_info0; - u32 kwqe_info1; - u32 kwqe_info2; - u32 kwqe_info3; -}; - -struct kcqe { - u32 kcqe_info0; - u32 kcqe_info1; - u32 kcqe_info2; - u32 kcqe_info3; - u32 kcqe_info4; - u32 kcqe_info5; - u32 kcqe_info6; - u32 kcqe_op_flag; - #define KCQE_RAMROD_COMPLETION (0x1<<27) /* Everest */ - #define KCQE_FLAGS_LAYER_MASK (0x7<<28) - #define KCQE_FLAGS_LAYER_MASK_MISC (0<<28) - #define KCQE_FLAGS_LAYER_MASK_L2 (2<<28) - #define KCQE_FLAGS_LAYER_MASK_L3 (3<<28) - #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28) - #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28) - #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28) - #define KCQE_FLAGS_NEXT (1<<31) - #define KCQE_FLAGS_OPCODE_MASK (0xff<<16) - #define KCQE_FLAGS_OPCODE_SHIFT (16) - #define KCQE_OPCODE(op) \ - (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT) -}; - -#define MAX_CNIC_CTL_DATA 64 -#define MAX_DRV_CTL_DATA 64 - -#define CNIC_CTL_STOP_CMD 1 -#define CNIC_CTL_START_CMD 2 -#define CNIC_CTL_COMPLETION_CMD 3 - -#define DRV_CTL_IO_WR_CMD 0x101 -#define DRV_CTL_IO_RD_CMD 0x102 -#define DRV_CTL_CTX_WR_CMD 0x103 -#define DRV_CTL_CTXTBL_WR_CMD 0x104 -#define DRV_CTL_COMPLETION_CMD 0x105 - -struct cnic_ctl_completion { - u32 cid; -}; - -struct drv_ctl_completion { - u32 comp_count; -}; - -struct cnic_ctl_info { - int cmd; - union { - struct cnic_ctl_completion comp; - char bytes[MAX_CNIC_CTL_DATA]; - } data; -}; - -struct drv_ctl_io { - u32 cid_addr; - u32 offset; - u32 data; - dma_addr_t dma_addr; -}; - -struct drv_ctl_info { - int cmd; - union { - struct drv_ctl_completion comp; - struct drv_ctl_io io; - char bytes[MAX_DRV_CTL_DATA]; - } data; -}; - -struct cnic_ops { - struct module *cnic_owner; - /* Calls to these functions are protected by RCU. When - * unregistering, we wait for any calls to complete before - * continuing. - */ - int (*cnic_handler)(void *, void *); - int (*cnic_ctl)(void *, struct cnic_ctl_info *); -}; - -#define MAX_CNIC_VEC 8 - -struct cnic_irq { - unsigned int vector; - void *status_blk; - u32 status_blk_num; - u32 irq_flags; -#define CNIC_IRQ_FL_MSIX 0x00000001 -}; - -struct cnic_eth_dev { - struct module *drv_owner; - u32 drv_state; -#define CNIC_DRV_STATE_REGD 0x00000001 -#define CNIC_DRV_STATE_USING_MSIX 0x00000002 - u32 chip_id; - u32 max_kwqe_pending; - struct pci_dev *pdev; - void __iomem *io_base; - - u32 ctx_tbl_offset; - u32 ctx_tbl_len; - int ctx_blk_size; - u32 starting_cid; - u32 max_iscsi_conn; - u32 max_fcoe_conn; - u32 max_rdma_conn; - u32 reserved0[2]; - - int num_irq; - struct cnic_irq irq_arr[MAX_CNIC_VEC]; - int (*drv_register_cnic)(struct net_device *, - struct cnic_ops *, void *); - int (*drv_unregister_cnic)(struct net_device *); - int (*drv_submit_kwqes_32)(struct net_device *, - struct kwqe *[], u32); - int (*drv_submit_kwqes_16)(struct net_device *, - struct kwqe_16 *[], u32); - int (*drv_ctl)(struct net_device *, struct drv_ctl_info *); - unsigned long reserved1[2]; -}; - -struct cnic_sockaddr { - union { - struct sockaddr_in v4; - struct sockaddr_in6 v6; - } local; - union { - struct sockaddr_in v4; - struct sockaddr_in6 v6; - } remote; -}; - -struct cnic_sock { - struct cnic_dev *dev; - void *context; - u32 src_ip[4]; - u32 dst_ip[4]; - u16 src_port; - u16 dst_port; - u16 vlan_id; - unsigned char old_ha[6]; - unsigned char ha[6]; - u32 mtu; - u32 cid; - u32 l5_cid; - u32 pg_cid; - int ulp_type; - - u32 ka_timeout; - u32 ka_interval; - u8 ka_max_probe_count; - u8 tos; - u8 ttl; - u8 snd_seq_scale; - u32 rcv_buf; - u32 snd_buf; - u32 seed; - - unsigned long tcp_flags; -#define SK_TCP_NO_DELAY_ACK 0x1 -#define SK_TCP_KEEP_ALIVE 0x2 -#define SK_TCP_NAGLE 0x4 -#define SK_TCP_TIMESTAMP 0x8 -#define SK_TCP_SACK 0x10 -#define SK_TCP_SEG_SCALING 0x20 - unsigned long flags; -#define SK_F_INUSE 0 -#define SK_F_OFFLD_COMPLETE 1 -#define SK_F_OFFLD_SCHED 2 -#define SK_F_PG_OFFLD_COMPLETE 3 -#define SK_F_CONNECT_START 4 -#define SK_F_IPV6 5 -#define SK_F_CLOSING 7 - - atomic_t ref_count; - u32 state; - struct kwqe kwqe1; - struct kwqe kwqe2; - struct kwqe kwqe3; -}; - -struct cnic_dev { - struct net_device *netdev; - struct pci_dev *pcidev; - void __iomem *regview; - struct list_head list; - - int (*register_device)(struct cnic_dev *dev, int ulp_type, - void *ulp_ctx); - int (*unregister_device)(struct cnic_dev *dev, int ulp_type); - int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[], - u32 num_wqes); - int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[], - u32 num_wqes); - - int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **, - void *); - int (*cm_destroy)(struct cnic_sock *); - int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *); - int (*cm_abort)(struct cnic_sock *); - int (*cm_close)(struct cnic_sock *); - struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type); - int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type, - char *data, u16 data_size); - unsigned long flags; -#define CNIC_F_CNIC_UP 1 -#define CNIC_F_BNX2_CLASS 3 -#define CNIC_F_BNX2X_CLASS 4 - atomic_t ref_count; - u8 mac_addr[6]; - - int max_iscsi_conn; - int max_fcoe_conn; - int max_rdma_conn; - - void *cnic_priv; -}; - -#define CNIC_WR(dev, off, val) writel(val, dev->regview + off) -#define CNIC_WR16(dev, off, val) writew(val, dev->regview + off) -#define CNIC_WR8(dev, off, val) writeb(val, dev->regview + off) -#define CNIC_RD(dev, off) readl(dev->regview + off) -#define CNIC_RD16(dev, off) readw(dev->regview + off) - -struct cnic_ulp_ops { - /* Calls to these functions are protected by RCU. When - * unregistering, we wait for any calls to complete before - * continuing. - */ - - void (*cnic_init)(struct cnic_dev *dev); - void (*cnic_exit)(struct cnic_dev *dev); - void (*cnic_start)(void *ulp_ctx); - void (*cnic_stop)(void *ulp_ctx); - void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[], - u32 num_cqes); - void (*indicate_netevent)(void *ulp_ctx, unsigned long event); - void (*cm_connect_complete)(struct cnic_sock *); - void (*cm_close_complete)(struct cnic_sock *); - void (*cm_abort_complete)(struct cnic_sock *); - void (*cm_remote_close)(struct cnic_sock *); - void (*cm_remote_abort)(struct cnic_sock *); - void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type, - char *data, u16 data_size); - struct module *owner; -}; - -extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); - -extern int cnic_unregister_driver(int ulp_type); - -#endif diff --git a/trunk/drivers/net/virtio_net.c b/trunk/drivers/net/virtio_net.c index 7fa620ddeb21..4d1d47953fc6 100644 --- a/trunk/drivers/net/virtio_net.c +++ b/trunk/drivers/net/virtio_net.c @@ -845,10 +845,6 @@ static int virtnet_probe(struct virtio_device *vdev) int err; struct net_device *dev; struct virtnet_info *vi; - struct virtqueue *vqs[3]; - vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL}; - const char *names[] = { "input", "output", "control" }; - int nvqs; /* Allocate ourselves a network device with room for our info */ dev = alloc_etherdev(sizeof(struct virtnet_info)); @@ -909,19 +905,25 @@ static int virtnet_probe(struct virtio_device *vdev) if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) vi->mergeable_rx_bufs = true; - /* We expect two virtqueues, receive then send, - * and optionally control. */ - nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2; - - err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names); - if (err) + /* We expect two virtqueues, receive then send. */ + vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); + if (IS_ERR(vi->rvq)) { + err = PTR_ERR(vi->rvq); goto free; + } - vi->rvq = vqs[0]; - vi->svq = vqs[1]; + vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done); + if (IS_ERR(vi->svq)) { + err = PTR_ERR(vi->svq); + goto free_recv; + } if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { - vi->cvq = vqs[2]; + vi->cvq = vdev->config->find_vq(vdev, 2, NULL); + if (IS_ERR(vi->cvq)) { + err = PTR_ERR(vi->svq); + goto free_send; + } if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) dev->features |= NETIF_F_HW_VLAN_FILTER; @@ -939,7 +941,7 @@ static int virtnet_probe(struct virtio_device *vdev) err = register_netdev(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); - goto free_vqs; + goto free_ctrl; } /* Last of all, set up some receive buffers. */ @@ -960,8 +962,13 @@ static int virtnet_probe(struct virtio_device *vdev) unregister: unregister_netdev(dev); -free_vqs: - vdev->config->del_vqs(vdev); +free_ctrl: + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) + vdev->config->del_vq(vi->cvq); +free_send: + vdev->config->del_vq(vi->svq); +free_recv: + vdev->config->del_vq(vi->rvq); free: free_netdev(dev); return err; @@ -987,10 +994,12 @@ static void virtnet_remove(struct virtio_device *vdev) BUG_ON(vi->num != 0); + vdev->config->del_vq(vi->svq); + vdev->config->del_vq(vi->rvq); + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) + vdev->config->del_vq(vi->cvq); unregister_netdev(vi->dev); - vdev->config->del_vqs(vi->vdev); - while (vi->pages) __free_pages(get_a_page(vi, GFP_KERNEL), 0); diff --git a/trunk/drivers/s390/kvm/kvm_virtio.c b/trunk/drivers/s390/kvm/kvm_virtio.c index e38e5d306faf..cbc8566fab70 100644 --- a/trunk/drivers/s390/kvm/kvm_virtio.c +++ b/trunk/drivers/s390/kvm/kvm_virtio.c @@ -173,9 +173,8 @@ static void kvm_notify(struct virtqueue *vq) * this device and sets it up. */ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, - unsigned index, - void (*callback)(struct virtqueue *vq), - const char *name) + unsigned index, + void (*callback)(struct virtqueue *vq)) { struct kvm_device *kdev = to_kvmdev(vdev); struct kvm_vqconfig *config; @@ -195,7 +194,7 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN, vdev, (void *) config->address, - kvm_notify, callback, name); + kvm_notify, callback); if (!vq) { err = -ENOMEM; goto unmap; @@ -227,38 +226,6 @@ static void kvm_del_vq(struct virtqueue *vq) KVM_S390_VIRTIO_RING_ALIGN)); } -static void kvm_del_vqs(struct virtio_device *vdev) -{ - struct virtqueue *vq, *n; - - list_for_each_entry_safe(vq, n, &vdev->vqs, list) - kvm_del_vq(vq); -} - -static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char *names[]) -{ - struct kvm_device *kdev = to_kvmdev(vdev); - int i; - - /* We must have this many virtqueues. */ - if (nvqs > kdev->desc->num_vq) - return -ENOENT; - - for (i = 0; i < nvqs; ++i) { - vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i]); - if (IS_ERR(vqs[i])) - goto error; - } - return 0; - -error: - kvm_del_vqs(vdev); - return PTR_ERR(vqs[i]); -} - /* * The config ops structure as defined by virtio config */ @@ -270,8 +237,8 @@ static struct virtio_config_ops kvm_vq_configspace_ops = { .get_status = kvm_get_status, .set_status = kvm_set_status, .reset = kvm_reset, - .find_vqs = kvm_find_vqs, - .del_vqs = kvm_del_vqs, + .find_vq = kvm_find_vq, + .del_vq = kvm_del_vq, }; /* diff --git a/trunk/drivers/s390/scsi/zfcp_ccw.c b/trunk/drivers/s390/scsi/zfcp_ccw.c index b2fe5cdbcaee..733fe3bf6285 100644 --- a/trunk/drivers/s390/scsi/zfcp_ccw.c +++ b/trunk/drivers/s390/scsi/zfcp_ccw.c @@ -11,24 +11,6 @@ #include "zfcp_ext.h" -#define ZFCP_MODEL_PRIV 0x4 - -static struct ccw_device_id zfcp_ccw_device_id[] = { - { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) }, - { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) }, - {}, -}; -MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id); - -/** - * zfcp_ccw_priv_sch - check if subchannel is privileged - * @adapter: Adapter/Subchannel to check - */ -int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter) -{ - return adapter->ccw_device->id.dev_model == ZFCP_MODEL_PRIV; -} - /** * zfcp_ccw_probe - probe function of zfcp driver * @ccw_device: pointer to belonging ccw device @@ -194,8 +176,8 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event) "ccnoti4", NULL); break; case CIO_BOXED: - dev_warn(&adapter->ccw_device->dev, "The FCP device " - "did not respond within the specified time\n"); + dev_warn(&adapter->ccw_device->dev, + "The ccw device did not respond in time.\n"); zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL); break; } @@ -217,6 +199,14 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev) up(&zfcp_data.config_sema); } +static struct ccw_device_id zfcp_ccw_device_id[] = { + { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) }, + { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x4) }, /* priv. */ + {}, +}; + +MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id); + static struct ccw_driver zfcp_ccw_driver = { .owner = THIS_MODULE, .name = "zfcp", diff --git a/trunk/drivers/s390/scsi/zfcp_dbf.c b/trunk/drivers/s390/scsi/zfcp_dbf.c index b99b87ce5a39..0a1a5dd8d018 100644 --- a/trunk/drivers/s390/scsi/zfcp_dbf.c +++ b/trunk/drivers/s390/scsi/zfcp_dbf.c @@ -163,7 +163,7 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req) } response->fsf_command = fsf_req->fsf_command; - response->fsf_reqid = fsf_req->req_id; + response->fsf_reqid = (unsigned long)fsf_req; response->fsf_seqno = fsf_req->seq_no; response->fsf_issued = fsf_req->issued; response->fsf_prot_status = qtcb->prefix.prot_status; @@ -737,7 +737,7 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req) spin_lock_irqsave(&adapter->san_dbf_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE); - r->fsf_reqid = fsf_req->req_id; + r->fsf_reqid = (unsigned long)fsf_req; r->fsf_seqno = fsf_req->seq_no; r->s_id = fc_host_port_id(adapter->scsi_host); r->d_id = wka_port->d_id; @@ -773,7 +773,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req) spin_lock_irqsave(&adapter->san_dbf_lock, flags); memset(r, 0, sizeof(*r)); strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE); - r->fsf_reqid = fsf_req->req_id; + r->fsf_reqid = (unsigned long)fsf_req; r->fsf_seqno = fsf_req->seq_no; r->s_id = wka_port->d_id; r->d_id = fc_host_port_id(adapter->scsi_host); @@ -803,7 +803,7 @@ static void zfcp_san_dbf_event_els(const char *tag, int level, spin_lock_irqsave(&adapter->san_dbf_lock, flags); memset(rec, 0, sizeof(*rec)); strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE); - rec->fsf_reqid = fsf_req->req_id; + rec->fsf_reqid = (unsigned long)fsf_req; rec->fsf_seqno = fsf_req->seq_no; rec->s_id = s_id; rec->d_id = d_id; @@ -965,7 +965,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level, ZFCP_DBF_SCSI_FCP_SNS_INFO); } - rec->fsf_reqid = fsf_req->req_id; + rec->fsf_reqid = (unsigned long)fsf_req; rec->fsf_seqno = fsf_req->seq_no; rec->fsf_issued = fsf_req->issued; } diff --git a/trunk/drivers/s390/scsi/zfcp_def.h b/trunk/drivers/s390/scsi/zfcp_def.h index 2074d45dbf6c..4c362a9069f0 100644 --- a/trunk/drivers/s390/scsi/zfcp_def.h +++ b/trunk/drivers/s390/scsi/zfcp_def.h @@ -47,6 +47,13 @@ /********************* CIO/QDIO SPECIFIC DEFINES *****************************/ +/* Adapter Identification Parameters */ +#define ZFCP_CONTROL_UNIT_TYPE 0x1731 +#define ZFCP_CONTROL_UNIT_MODEL 0x03 +#define ZFCP_DEVICE_TYPE 0x1732 +#define ZFCP_DEVICE_MODEL 0x03 +#define ZFCP_DEVICE_MODEL_PRIV 0x04 + /* DMQ bug workaround: don't use last SBALE */ #define ZFCP_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1) diff --git a/trunk/drivers/s390/scsi/zfcp_erp.c b/trunk/drivers/s390/scsi/zfcp_erp.c index e50ea465bc2b..fdc9b4352a64 100644 --- a/trunk/drivers/s390/scsi/zfcp_erp.c +++ b/trunk/drivers/s390/scsi/zfcp_erp.c @@ -880,7 +880,6 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) zfcp_port_put(port); return ZFCP_ERP_CONTINUES; } - /* fall through */ case ZFCP_ERP_STEP_NAMESERVER_LOOKUP: if (!port->d_id) return ZFCP_ERP_FAILED; @@ -895,13 +894,8 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) act->step = ZFCP_ERP_STEP_PORT_CLOSING; return ZFCP_ERP_CONTINUES; } - } - if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) { - port->d_id = 0; - _zfcp_erp_port_reopen(port, 0, "erpsoc1", NULL); - return ZFCP_ERP_EXIT; - } /* fall through otherwise */ + } } return ZFCP_ERP_FAILED; } diff --git a/trunk/drivers/s390/scsi/zfcp_ext.h b/trunk/drivers/s390/scsi/zfcp_ext.h index 120a9a1c81f7..2e31b536548c 100644 --- a/trunk/drivers/s390/scsi/zfcp_ext.h +++ b/trunk/drivers/s390/scsi/zfcp_ext.h @@ -27,7 +27,6 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int); /* zfcp_ccw.c */ extern int zfcp_ccw_register(void); -extern int zfcp_ccw_priv_sch(struct zfcp_adapter *); extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *); /* zfcp_cfdc.c */ diff --git a/trunk/drivers/s390/scsi/zfcp_fc.c b/trunk/drivers/s390/scsi/zfcp_fc.c index bb2752b4130f..19ae0842047c 100644 --- a/trunk/drivers/s390/scsi/zfcp_fc.c +++ b/trunk/drivers/s390/scsi/zfcp_fc.c @@ -150,14 +150,9 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range, struct zfcp_port *port; read_lock_irqsave(&zfcp_data.config_lock, flags); - list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) { + list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) if ((port->d_id & range) == (elem->nport_did & range)) zfcp_test_link(port); - if (!port->d_id) - zfcp_erp_port_reopen(port, - ZFCP_STATUS_COMMON_ERP_FAILED, - "fcrscn1", NULL); - } read_unlock_irqrestore(&zfcp_data.config_lock, flags); } diff --git a/trunk/drivers/s390/scsi/zfcp_fsf.c b/trunk/drivers/s390/scsi/zfcp_fsf.c index e6dae3744e79..74dee32afba8 100644 --- a/trunk/drivers/s390/scsi/zfcp_fsf.c +++ b/trunk/drivers/s390/scsi/zfcp_fsf.c @@ -526,7 +526,6 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) break; case FSF_TOPO_AL: fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; - /* fall through */ default: dev_err(&adapter->ccw_device->dev, "Unknown or unsupported arbitrated loop " @@ -898,7 +897,6 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) switch (fsq->word[0]) { case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE: zfcp_test_link(unit->port); - /* fall through */ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED: req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; @@ -995,7 +993,6 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) break; case FSF_PORT_HANDLE_NOT_VALID: zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req); - /* fall through */ case FSF_GENERIC_COMMAND_REJECTED: case FSF_PAYLOAD_SIZE_MISMATCH: case FSF_REQUEST_SIZE_TOO_LARGE: @@ -1402,7 +1399,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) struct fsf_plogi *plogi; if (req->status & ZFCP_STATUS_FSFREQ_ERROR) - goto out; + return; switch (header->fsf_status) { case FSF_PORT_ALREADY_OPEN: @@ -1464,9 +1461,6 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) req->status |= ZFCP_STATUS_FSFREQ_ERROR; break; } - -out: - zfcp_port_put(port); } /** @@ -1479,7 +1473,6 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) struct qdio_buffer_element *sbale; struct zfcp_adapter *adapter = erp_action->adapter; struct zfcp_fsf_req *req; - struct zfcp_port *port = erp_action->port; int retval = -EIO; spin_lock_bh(&adapter->req_q_lock); @@ -1500,18 +1493,16 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action) sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY; req->handler = zfcp_fsf_open_port_handler; - req->qtcb->bottom.support.d_id = port->d_id; - req->data = port; + req->qtcb->bottom.support.d_id = erp_action->port->d_id; + req->data = erp_action->port; req->erp_action = erp_action; erp_action->fsf_req = req; - zfcp_port_get(port); zfcp_fsf_start_erp_timer(req); retval = zfcp_fsf_req_send(req); if (retval) { zfcp_fsf_req_free(req); erp_action->fsf_req = NULL; - zfcp_port_put(port); } out: spin_unlock_bh(&adapter->req_q_lock); @@ -1599,10 +1590,8 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED: dev_warn(&req->adapter->ccw_device->dev, "Opening WKA port 0x%x failed\n", wka_port->d_id); - /* fall through */ case FSF_ADAPTER_STATUS_AVAILABLE: req->status |= ZFCP_STATUS_FSFREQ_ERROR; - /* fall through */ case FSF_ACCESS_DENIED: wka_port->status = ZFCP_WKA_PORT_OFFLINE; break; @@ -1887,7 +1876,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req) if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) && (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) && - !zfcp_ccw_priv_sch(adapter)) { + (adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) { exclusive = (bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE); readwrite = (bottom->lun_access_info & @@ -2325,7 +2314,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, { struct zfcp_fsf_req *req; struct fcp_cmnd_iu *fcp_cmnd_iu; - unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; + unsigned int sbtype; int real_bytes, retval = -EIO; struct zfcp_adapter *adapter = unit->port->adapter; @@ -2367,9 +2356,11 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, switch (scsi_cmnd->sc_data_direction) { case DMA_NONE: req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; + sbtype = SBAL_FLAGS0_TYPE_READ; break; case DMA_FROM_DEVICE: req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ; + sbtype = SBAL_FLAGS0_TYPE_READ; fcp_cmnd_iu->rddata = 1; break; case DMA_TO_DEVICE: @@ -2378,6 +2369,8 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, fcp_cmnd_iu->wddata = 1; break; case DMA_BIDIRECTIONAL: + default: + retval = -EIO; goto failed_scsi_cmnd; } @@ -2401,7 +2394,9 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit, scsi_sglist(scsi_cmnd), FSF_MAX_SBALS_PER_REQ); if (unlikely(real_bytes < 0)) { - if (req->sbal_number >= FSF_MAX_SBALS_PER_REQ) { + if (req->sbal_number < FSF_MAX_SBALS_PER_REQ) + retval = -EIO; + else { dev_err(&adapter->ccw_device->dev, "Oversize data package, unit 0x%016Lx " "on port 0x%016Lx closed\n", diff --git a/trunk/drivers/s390/scsi/zfcp_scsi.c b/trunk/drivers/s390/scsi/zfcp_scsi.c index 7d0da230eb63..e8fbeaeb5fbf 100644 --- a/trunk/drivers/s390/scsi/zfcp_scsi.c +++ b/trunk/drivers/s390/scsi/zfcp_scsi.c @@ -12,10 +12,6 @@ #include "zfcp_ext.h" #include -static unsigned int default_depth = 32; -module_param_named(queue_depth, default_depth, uint, 0600); -MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); - /* Find start of Sense Information in FCP response unit*/ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) { @@ -28,12 +24,6 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu) return fcp_sns_info_ptr; } -static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth) -{ - scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); - return sdev->queue_depth; -} - static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) { struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; @@ -44,7 +34,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) static int zfcp_scsi_slave_configure(struct scsi_device *sdp) { if (sdp->tagged_supported) - scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth); + scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, 32); else scsi_adjust_queue_depth(sdp, 0, 1); return 0; @@ -657,7 +647,6 @@ struct zfcp_data zfcp_data = { .name = "zfcp", .module = THIS_MODULE, .proc_name = "zfcp", - .change_queue_depth = zfcp_scsi_change_queue_depth, .slave_alloc = zfcp_scsi_slave_alloc, .slave_configure = zfcp_scsi_slave_configure, .slave_destroy = zfcp_scsi_slave_destroy, diff --git a/trunk/drivers/scsi/Kconfig b/trunk/drivers/scsi/Kconfig index 6a19ed9a1194..fb2740789b68 100644 --- a/trunk/drivers/scsi/Kconfig +++ b/trunk/drivers/scsi/Kconfig @@ -191,19 +191,20 @@ config SCSI_ENCLOSURE it has an enclosure device. Selecting this option will just allow certain enclosure conditions to be reported and is not required. +comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs" + depends on SCSI + config SCSI_MULTI_LUN bool "Probe all LUNs on each SCSI device" depends on SCSI help - Some devices support more than one LUN (Logical Unit Number) in order - to allow access to several media, e.g. CD jukebox, USB card reader, - mobile phone in mass storage mode. This option forces the kernel to - probe for all LUNs by default. This setting can be overriden by - max_luns boot/module parameter. Note that this option does not affect - devices conforming to SCSI-3 or higher as they can explicitely report - their number of LUNs. It is safe to say Y here unless you have one of - those rare devices which reacts in an unexpected way when probed for - multiple LUNs. + If you have a SCSI device that supports more than one LUN (Logical + Unit Number), e.g. a CD jukebox, and only one LUN is detected, you + can say Y here to force the SCSI driver to probe for multiple LUNs. + A SCSI device with multiple LUNs acts logically like multiple SCSI + devices. The vast majority of SCSI devices have only one LUN, and + so most people can say N here. The max_luns boot/module parameter + allows to override this setting. config SCSI_CONSTANTS bool "Verbose SCSI error reporting (kernel size +=12K)" @@ -354,7 +355,6 @@ config ISCSI_TCP http://open-iscsi.org source "drivers/scsi/cxgb3i/Kconfig" -source "drivers/scsi/bnx2i/Kconfig" config SGIWD93_SCSI tristate "SGI WD93C93 SCSI Driver" @@ -508,7 +508,6 @@ config SCSI_AIC7XXX_OLD source "drivers/scsi/aic7xxx/Kconfig.aic79xx" source "drivers/scsi/aic94xx/Kconfig" -source "drivers/scsi/mvsas/Kconfig" config SCSI_DPT_I2O tristate "Adaptec I2O RAID support " @@ -1051,6 +1050,16 @@ config SCSI_IZIP_SLOW_CTR Generally, saying N is fine. +config SCSI_MVSAS + tristate "Marvell 88SE6440 SAS/SATA support" + depends on PCI && SCSI + select SCSI_SAS_LIBSAS + help + This driver supports Marvell SAS/SATA PCI devices. + + To compiler this driver as a module, choose M here: the module + will be called mvsas. + config SCSI_NCR53C406A tristate "NCR53c406a SCSI support" depends on ISA && SCSI diff --git a/trunk/drivers/scsi/Makefile b/trunk/drivers/scsi/Makefile index 25429ea63d0a..a5049cfb40ed 100644 --- a/trunk/drivers/scsi/Makefile +++ b/trunk/drivers/scsi/Makefile @@ -126,10 +126,9 @@ obj-$(CONFIG_SCSI_IBMVSCSIS) += ibmvscsi/ obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/ obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o obj-$(CONFIG_SCSI_STEX) += stex.o -obj-$(CONFIG_SCSI_MVSAS) += mvsas/ +obj-$(CONFIG_SCSI_MVSAS) += mvsas.o obj-$(CONFIG_PS3_ROM) += ps3rom.o obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/ -obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ obj-$(CONFIG_ARM) += arm/ diff --git a/trunk/drivers/scsi/NCR_D700.c b/trunk/drivers/scsi/NCR_D700.c index 1cdf09a4779a..c889d8458684 100644 --- a/trunk/drivers/scsi/NCR_D700.c +++ b/trunk/drivers/scsi/NCR_D700.c @@ -224,7 +224,7 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq, return ret; } -static irqreturn_t +static int NCR_D700_intr(int irq, void *data) { struct NCR_D700_private *p = (struct NCR_D700_private *)data; diff --git a/trunk/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/trunk/drivers/scsi/bnx2i/57xx_iscsi_constants.h deleted file mode 100644 index 2fceb19eb27b..000000000000 --- a/trunk/drivers/scsi/bnx2i/57xx_iscsi_constants.h +++ /dev/null @@ -1,155 +0,0 @@ -/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI - * - * Copyright (c) 2006 - 2009 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) - */ -#ifndef __57XX_ISCSI_CONSTANTS_H_ -#define __57XX_ISCSI_CONSTANTS_H_ - -/** -* This file defines HSI constants for the iSCSI flows -*/ - -/* iSCSI request op codes */ -#define ISCSI_OPCODE_CLEANUP_REQUEST (7) - -/* iSCSI response/messages op codes */ -#define ISCSI_OPCODE_CLEANUP_RESPONSE (0x27) -#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION (0) - -/* iSCSI task types */ -#define ISCSI_TASK_TYPE_READ (0) -#define ISCSI_TASK_TYPE_WRITE (1) -#define ISCSI_TASK_TYPE_MPATH (2) - -/* initial CQ sequence numbers */ -#define ISCSI_INITIAL_SN (1) - -/* KWQ (kernel work queue) layer codes */ -#define ISCSI_KWQE_LAYER_CODE (6) - -/* KWQ (kernel work queue) request op codes */ -#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0) -#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1) -#define ISCSI_KWQE_OPCODE_UPDATE_CONN (2) -#define ISCSI_KWQE_OPCODE_DESTROY_CONN (3) -#define ISCSI_KWQE_OPCODE_INIT1 (4) -#define ISCSI_KWQE_OPCODE_INIT2 (5) - -/* KCQ (kernel completion queue) response op codes */ -#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN (0x10) -#define ISCSI_KCQE_OPCODE_UPDATE_CONN (0x12) -#define ISCSI_KCQE_OPCODE_DESTROY_CONN (0x13) -#define ISCSI_KCQE_OPCODE_INIT (0x14) -#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK (0x15) -#define ISCSI_KCQE_OPCODE_TCP_RESET (0x16) -#define ISCSI_KCQE_OPCODE_TCP_SYN (0x17) -#define ISCSI_KCQE_OPCODE_TCP_FIN (0X18) -#define ISCSI_KCQE_OPCODE_TCP_ERROR (0x19) -#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20) -#define ISCSI_KCQE_OPCODE_ISCSI_ERROR (0x21) - -/* KCQ (kernel completion queue) completion status */ -#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS (0x0) -#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x1) -#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x2) -#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x3) -#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR (0x4) - -#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR (0x5) -#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR (0x6) - -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE (0xa) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE (0xb) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN (0xc) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT (0xd) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN (0xe) - -/* Response */ -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN (0xf) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T (0x10) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO (0x2c) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG (0x2d) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0 (0x11) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1 (0x12) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2 (0x13) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3 (0x14) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4 (0x15) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5 (0x16) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6 (0x17) - -/* Data-In */ -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN (0x18) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN (0x19) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO (0x1a) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV (0x1b) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN (0x1c) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN (0x1d) - -/* R2T */ -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF (0x1f) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN (0x20) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN (0x21) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED (0x24) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV (0x25) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN (0x26) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27) - -/* TMF */ -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN (0x28) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN (0x29) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN (0x2a) -#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP (0x2b) - -/* IP/TCP processing errors: */ -#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT (0x40) -#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS (0x41) -#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG (0x42) -#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS (0x43) - -/* iSCSI licensing errors */ -/* general iSCSI license not installed */ -#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED (0x50) -/* additional LOM specific iSCSI license not installed */ -#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED (0x51) - -/* SQ/RQ/CQ DB structure sizes */ -#define ISCSI_SQ_DB_SIZE (16) -#define ISCSI_RQ_DB_SIZE (16) -#define ISCSI_CQ_DB_SIZE (80) - -#define ISCSI_SQN_TO_NOTIFY_NOT_VALID 0xFFFF - -/* Page size codes (for flags field in connection offload request) */ -#define ISCSI_PAGE_SIZE_256 (0) -#define ISCSI_PAGE_SIZE_512 (1) -#define ISCSI_PAGE_SIZE_1K (2) -#define ISCSI_PAGE_SIZE_2K (3) -#define ISCSI_PAGE_SIZE_4K (4) -#define ISCSI_PAGE_SIZE_8K (5) -#define ISCSI_PAGE_SIZE_16K (6) -#define ISCSI_PAGE_SIZE_32K (7) -#define ISCSI_PAGE_SIZE_64K (8) -#define ISCSI_PAGE_SIZE_128K (9) -#define ISCSI_PAGE_SIZE_256K (10) -#define ISCSI_PAGE_SIZE_512K (11) -#define ISCSI_PAGE_SIZE_1M (12) -#define ISCSI_PAGE_SIZE_2M (13) -#define ISCSI_PAGE_SIZE_4M (14) -#define ISCSI_PAGE_SIZE_8M (15) - -/* Iscsi PDU related defines */ -#define ISCSI_HEADER_SIZE (48) -#define ISCSI_DIGEST_SHIFT (2) -#define ISCSI_DIGEST_SIZE (4) - -#define B577XX_ISCSI_CONNECTION_TYPE 3 - -#endif /*__57XX_ISCSI_CONSTANTS_H_ */ diff --git a/trunk/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/trunk/drivers/scsi/bnx2i/57xx_iscsi_hsi.h deleted file mode 100644 index 36af1afef9b6..000000000000 --- a/trunk/drivers/scsi/bnx2i/57xx_iscsi_hsi.h +++ /dev/null @@ -1,1509 +0,0 @@ -/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI. - * - * Copyright (c) 2006 - 2009 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) - */ -#ifndef __57XX_ISCSI_HSI_LINUX_LE__ -#define __57XX_ISCSI_HSI_LINUX_LE__ - -/* - * iSCSI Async CQE - */ -struct bnx2i_async_msg { -#if defined(__BIG_ENDIAN) - u8 op_code; - u8 reserved1; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 reserved1; - u8 op_code; -#endif - u32 reserved2; - u32 exp_cmd_sn; - u32 max_cmd_sn; - u32 reserved3[2]; -#if defined(__BIG_ENDIAN) - u16 reserved5; - u8 err_code; - u8 reserved4; -#elif defined(__LITTLE_ENDIAN) - u8 reserved4; - u8 err_code; - u16 reserved5; -#endif - u32 reserved6; - u32 lun[2]; -#if defined(__BIG_ENDIAN) - u8 async_event; - u8 async_vcode; - u16 param1; -#elif defined(__LITTLE_ENDIAN) - u16 param1; - u8 async_vcode; - u8 async_event; -#endif -#if defined(__BIG_ENDIAN) - u16 param2; - u16 param3; -#elif defined(__LITTLE_ENDIAN) - u16 param3; - u16 param2; -#endif - u32 reserved7[3]; - u32 cq_req_sn; -}; - - -/* - * iSCSI Buffer Descriptor (BD) - */ -struct iscsi_bd { - u32 buffer_addr_hi; - u32 buffer_addr_lo; -#if defined(__BIG_ENDIAN) - u16 reserved0; - u16 buffer_length; -#elif defined(__LITTLE_ENDIAN) - u16 buffer_length; - u16 reserved0; -#endif -#if defined(__BIG_ENDIAN) - u16 reserved3; - u16 flags; -#define ISCSI_BD_RESERVED1 (0x3F<<0) -#define ISCSI_BD_RESERVED1_SHIFT 0 -#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6) -#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6 -#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7) -#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7 -#define ISCSI_BD_RESERVED2 (0xFF<<8) -#define ISCSI_BD_RESERVED2_SHIFT 8 -#elif defined(__LITTLE_ENDIAN) - u16 flags; -#define ISCSI_BD_RESERVED1 (0x3F<<0) -#define ISCSI_BD_RESERVED1_SHIFT 0 -#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6) -#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6 -#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7) -#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7 -#define ISCSI_BD_RESERVED2 (0xFF<<8) -#define ISCSI_BD_RESERVED2_SHIFT 8 - u16 reserved3; -#endif -}; - - -/* - * iSCSI Cleanup SQ WQE - */ -struct bnx2i_cleanup_request { -#if defined(__BIG_ENDIAN) - u8 op_code; - u8 reserved1; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 reserved1; - u8 op_code; -#endif - u32 reserved2[3]; -#if defined(__BIG_ENDIAN) - u16 reserved3; - u16 itt; -#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0) -#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0 -#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14) -#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 itt; -#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0) -#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0 -#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14) -#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14 - u16 reserved3; -#endif - u32 reserved4[10]; -#if defined(__BIG_ENDIAN) - u8 cq_index; - u8 reserved6; - u16 reserved5; -#elif defined(__LITTLE_ENDIAN) - u16 reserved5; - u8 reserved6; - u8 cq_index; -#endif -}; - - -/* - * iSCSI Cleanup CQE - */ -struct bnx2i_cleanup_response { -#if defined(__BIG_ENDIAN) - u8 op_code; - u8 status; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 status; - u8 op_code; -#endif - u32 reserved1[3]; - u32 reserved2[2]; -#if defined(__BIG_ENDIAN) - u16 reserved4; - u8 err_code; - u8 reserved3; -#elif defined(__LITTLE_ENDIAN) - u8 reserved3; - u8 err_code; - u16 reserved4; -#endif - u32 reserved5[7]; -#if defined(__BIG_ENDIAN) - u16 reserved6; - u16 itt; -#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0) -#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0 -#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14) -#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 itt; -#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0) -#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0 -#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14) -#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14 - u16 reserved6; -#endif - u32 cq_req_sn; -}; - - -/* - * SCSI read/write SQ WQE - */ -struct bnx2i_cmd_request { -#if defined(__BIG_ENDIAN) - u8 op_code; - u8 op_attr; -#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0) -#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0 -#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3) -#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3 -#define ISCSI_CMD_REQUEST_WRITE (0x1<<5) -#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5 -#define ISCSI_CMD_REQUEST_READ (0x1<<6) -#define ISCSI_CMD_REQUEST_READ_SHIFT 6 -#define ISCSI_CMD_REQUEST_FINAL (0x1<<7) -#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7 - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 op_attr; -#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0) -#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0 -#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3) -#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3 -#define ISCSI_CMD_REQUEST_WRITE (0x1<<5) -#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5 -#define ISCSI_CMD_REQUEST_READ (0x1<<6) -#define ISCSI_CMD_REQUEST_READ_SHIFT 6 -#define ISCSI_CMD_REQUEST_FINAL (0x1<<7) -#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7 - u8 op_code; -#endif -#if defined(__BIG_ENDIAN) - u16 ud_buffer_offset; - u16 sd_buffer_offset; -#elif defined(__LITTLE_ENDIAN) - u16 sd_buffer_offset; - u16 ud_buffer_offset; -#endif - u32 lun[2]; -#if defined(__BIG_ENDIAN) - u16 reserved2; - u16 itt; -#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0) -#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0 -#define ISCSI_CMD_REQUEST_TYPE (0x3<<14) -#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 itt; -#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0) -#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0 -#define ISCSI_CMD_REQUEST_TYPE (0x3<<14) -#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14 - u16 reserved2; -#endif - u32 total_data_transfer_length; - u32 cmd_sn; - u32 reserved3; - u32 cdb[4]; - u32 zero_fill; - u32 bd_list_addr_lo; - u32 bd_list_addr_hi; -#if defined(__BIG_ENDIAN) - u8 cq_index; - u8 sd_start_bd_index; - u8 ud_start_bd_index; - u8 num_bds; -#elif defined(__LITTLE_ENDIAN) - u8 num_bds; - u8 ud_start_bd_index; - u8 sd_start_bd_index; - u8 cq_index; -#endif -}; - - -/* - * task statistics for write response - */ -struct bnx2i_write_resp_task_stat { - u32 num_data_ins; -}; - -/* - * task statistics for read response - */ -struct bnx2i_read_resp_task_stat { -#if defined(__BIG_ENDIAN) - u16 num_data_outs; - u16 num_r2ts; -#elif defined(__LITTLE_ENDIAN) - u16 num_r2ts; - u16 num_data_outs; -#endif -}; - -/* - * task statistics for iSCSI cmd response - */ -union bnx2i_cmd_resp_task_stat { - struct bnx2i_write_resp_task_stat write_stat; - struct bnx2i_read_resp_task_stat read_stat; -}; - -/* - * SCSI Command CQE - */ -struct bnx2i_cmd_response { -#if defined(__BIG_ENDIAN) - u8 op_code; - u8 response_flags; -#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0) -#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0 -#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1) -#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1 -#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2) -#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2 -#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3) -#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3 -#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4) -#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4 -#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5) -#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5 - u8 response; - u8 status; -#elif defined(__LITTLE_ENDIAN) - u8 status; - u8 response; - u8 response_flags; -#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0) -#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0 -#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1) -#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1 -#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2) -#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2 -#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3) -#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3 -#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4) -#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4 -#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5) -#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5 - u8 op_code; -#endif - u32 data_length; - u32 exp_cmd_sn; - u32 max_cmd_sn; - u32 reserved2; - u32 residual_count; -#if defined(__BIG_ENDIAN) - u16 reserved4; - u8 err_code; - u8 reserved3; -#elif defined(__LITTLE_ENDIAN) - u8 reserved3; - u8 err_code; - u16 reserved4; -#endif - u32 reserved5[5]; - union bnx2i_cmd_resp_task_stat task_stat; - u32 reserved6; -#if defined(__BIG_ENDIAN) - u16 reserved7; - u16 itt; -#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0) -#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0 -#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14) -#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 itt; -#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0) -#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0 -#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14) -#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14 - u16 reserved7; -#endif - u32 cq_req_sn; -}; - - - -/* - * firmware middle-path request SQ WQE - */ -struct bnx2i_fw_mp_request { -#if defined(__BIG_ENDIAN) - u8 op_code; - u8 op_attr; - u16 hdr_opaque1; -#elif defined(__LITTLE_ENDIAN) - u16 hdr_opaque1; - u8 op_attr; - u8 op_code; -#endif - u32 data_length; - u32 hdr_opaque2[2]; -#if defined(__BIG_ENDIAN) - u16 reserved0; - u16 itt; -#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0) -#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0 -#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14) -#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 itt; -#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0) -#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0 -#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14) -#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14 - u16 reserved0; -#endif - u32 hdr_opaque3[4]; - u32 resp_bd_list_addr_lo; - u32 resp_bd_list_addr_hi; - u32 resp_buffer; -#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) -#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 -#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24) -#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24 -#if defined(__BIG_ENDIAN) - u16 reserved4; - u8 reserved3; - u8 flags; -#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0) -#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0 -#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1) -#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1 -#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) -#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 -#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3) -#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3 -#elif defined(__LITTLE_ENDIAN) - u8 flags; -#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0) -#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0 -#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1) -#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1 -#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) -#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 -#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3) -#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3 - u8 reserved3; - u16 reserved4; -#endif - u32 bd_list_addr_lo; - u32 bd_list_addr_hi; -#if defined(__BIG_ENDIAN) - u8 cq_index; - u8 reserved6; - u8 reserved5; - u8 num_bds; -#elif defined(__LITTLE_ENDIAN) - u8 num_bds; - u8 reserved5; - u8 reserved6; - u8 cq_index; -#endif -}; - - -/* - * firmware response - CQE: used only by firmware - */ -struct bnx2i_fw_response { - u32 hdr_dword1[2]; - u32 hdr_exp_cmd_sn; - u32 hdr_max_cmd_sn; - u32 hdr_ttt; - u32 hdr_res_cnt; - u32 cqe_flags; -#define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0) -#define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0 -#define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8) -#define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8 -#define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16) -#define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16 - u32 stat_sn; - u32 hdr_dword2[2]; - u32 hdr_dword3[2]; - u32 task_stat; - u32 reserved0; - u32 hdr_itt; - u32 cq_req_sn; -}; - - -/* - * iSCSI KCQ CQE parameters - */ -union iscsi_kcqe_params { - u32 reserved0[4]; -}; - -/* - * iSCSI KCQ CQE - */ -struct iscsi_kcqe { - u32 iscsi_conn_id; - u32 completion_status; - u32 iscsi_conn_context_id; - union iscsi_kcqe_params params; -#if defined(__BIG_ENDIAN) - u8 flags; -#define ISCSI_KCQE_RESERVED0 (0xF<<0) -#define ISCSI_KCQE_RESERVED0_SHIFT 0 -#define ISCSI_KCQE_LAYER_CODE (0x7<<4) -#define ISCSI_KCQE_LAYER_CODE_SHIFT 4 -#define ISCSI_KCQE_RESERVED1 (0x1<<7) -#define ISCSI_KCQE_RESERVED1_SHIFT 7 - u8 op_code; - u16 qe_self_seq; -#elif defined(__LITTLE_ENDIAN) - u16 qe_self_seq; - u8 op_code; - u8 flags; -#define ISCSI_KCQE_RESERVED0 (0xF<<0) -#define ISCSI_KCQE_RESERVED0_SHIFT 0 -#define ISCSI_KCQE_LAYER_CODE (0x7<<4) -#define ISCSI_KCQE_LAYER_CODE_SHIFT 4 -#define ISCSI_KCQE_RESERVED1 (0x1<<7) -#define ISCSI_KCQE_RESERVED1_SHIFT 7 -#endif -}; - - - -/* - * iSCSI KWQE header - */ -struct iscsi_kwqe_header { -#if defined(__BIG_ENDIAN) - u8 flags; -#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0) -#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0 -#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4) -#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4 -#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7) -#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7 - u8 op_code; -#elif defined(__LITTLE_ENDIAN) - u8 op_code; - u8 flags; -#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0) -#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0 -#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4) -#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4 -#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7) -#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7 -#endif -}; - -/* - * iSCSI firmware init request 1 - */ -struct iscsi_kwqe_init1 { -#if defined(__BIG_ENDIAN) - struct iscsi_kwqe_header hdr; - u8 reserved0; - u8 num_cqs; -#elif defined(__LITTLE_ENDIAN) - u8 num_cqs; - u8 reserved0; - struct iscsi_kwqe_header hdr; -#endif - u32 dummy_buffer_addr_lo; - u32 dummy_buffer_addr_hi; -#if defined(__BIG_ENDIAN) - u16 num_ccells_per_conn; - u16 num_tasks_per_conn; -#elif defined(__LITTLE_ENDIAN) - u16 num_tasks_per_conn; - u16 num_ccells_per_conn; -#endif -#if defined(__BIG_ENDIAN) - u16 sq_wqes_per_page; - u16 sq_num_wqes; -#elif defined(__LITTLE_ENDIAN) - u16 sq_num_wqes; - u16 sq_wqes_per_page; -#endif -#if defined(__BIG_ENDIAN) - u8 cq_log_wqes_per_page; - u8 flags; -#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0) -#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0 -#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4) -#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4 -#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5) -#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5 -#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6) -#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6 - u16 cq_num_wqes; -#elif defined(__LITTLE_ENDIAN) - u16 cq_num_wqes; - u8 flags; -#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0) -#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0 -#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4) -#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4 -#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5) -#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5 -#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6) -#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6 - u8 cq_log_wqes_per_page; -#endif -#if defined(__BIG_ENDIAN) - u16 cq_num_pages; - u16 sq_num_pages; -#elif defined(__LITTLE_ENDIAN) - u16 sq_num_pages; - u16 cq_num_pages; -#endif -#if defined(__BIG_ENDIAN) - u16 rq_buffer_size; - u16 rq_num_wqes; -#elif defined(__LITTLE_ENDIAN) - u16 rq_num_wqes; - u16 rq_buffer_size; -#endif -}; - -/* - * iSCSI firmware init request 2 - */ -struct iscsi_kwqe_init2 { -#if defined(__BIG_ENDIAN) - struct iscsi_kwqe_header hdr; - u16 max_cq_sqn; -#elif defined(__LITTLE_ENDIAN) - u16 max_cq_sqn; - struct iscsi_kwqe_header hdr; -#endif - u32 error_bit_map[2]; - u32 reserved1[5]; -}; - -/* - * Initial iSCSI connection offload request 1 - */ -struct iscsi_kwqe_conn_offload1 { -#if defined(__BIG_ENDIAN) - struct iscsi_kwqe_header hdr; - u16 iscsi_conn_id; -#elif defined(__LITTLE_ENDIAN) - u16 iscsi_conn_id; - struct iscsi_kwqe_header hdr; -#endif - u32 sq_page_table_addr_lo; - u32 sq_page_table_addr_hi; - u32 cq_page_table_addr_lo; - u32 cq_page_table_addr_hi; - u32 reserved0[3]; -}; - -/* - * iSCSI Page Table Entry (PTE) - */ -struct iscsi_pte { - u32 hi; - u32 lo; -}; - -/* - * Initial iSCSI connection offload request 2 - */ -struct iscsi_kwqe_conn_offload2 { -#if defined(__BIG_ENDIAN) - struct iscsi_kwqe_header hdr; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - struct iscsi_kwqe_header hdr; -#endif - u32 rq_page_table_addr_lo; - u32 rq_page_table_addr_hi; - struct iscsi_pte sq_first_pte; - struct iscsi_pte cq_first_pte; - u32 num_additional_wqes; -}; - - -/* - * Initial iSCSI connection offload request 3 - */ -struct iscsi_kwqe_conn_offload3 { -#if defined(__BIG_ENDIAN) - struct iscsi_kwqe_header hdr; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - struct iscsi_kwqe_header hdr; -#endif - u32 reserved1; - struct iscsi_pte qp_first_pte[3]; -}; - - -/* - * iSCSI connection update request - */ -struct iscsi_kwqe_conn_update { -#if defined(__BIG_ENDIAN) - struct iscsi_kwqe_header hdr; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - struct iscsi_kwqe_header hdr; -#endif -#if defined(__BIG_ENDIAN) - u8 session_error_recovery_level; - u8 max_outstanding_r2ts; - u8 reserved2; - u8 conn_flags; -#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0) -#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0 -#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1) -#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1 -#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2) -#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2 -#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3) -#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3 -#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4) -#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4 -#elif defined(__LITTLE_ENDIAN) - u8 conn_flags; -#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0) -#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0 -#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1) -#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1 -#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2) -#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2 -#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3) -#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3 -#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4) -#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4 - u8 reserved2; - u8 max_outstanding_r2ts; - u8 session_error_recovery_level; -#endif - u32 context_id; - u32 max_send_pdu_length; - u32 max_recv_pdu_length; - u32 first_burst_length; - u32 max_burst_length; - u32 exp_stat_sn; -}; - -/* - * iSCSI destroy connection request - */ -struct iscsi_kwqe_conn_destroy { -#if defined(__BIG_ENDIAN) - struct iscsi_kwqe_header hdr; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - struct iscsi_kwqe_header hdr; -#endif - u32 context_id; - u32 reserved1[6]; -}; - -/* - * iSCSI KWQ WQE - */ -union iscsi_kwqe { - struct iscsi_kwqe_init1 init1; - struct iscsi_kwqe_init2 init2; - struct iscsi_kwqe_conn_offload1 conn_offload1; - struct iscsi_kwqe_conn_offload2 conn_offload2; - struct iscsi_kwqe_conn_update conn_update; - struct iscsi_kwqe_conn_destroy conn_destroy; -}; - -/* - * iSCSI Login SQ WQE - */ -struct bnx2i_login_request { -#if defined(__BIG_ENDIAN) - u8 op_code; - u8 op_attr; -#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0) -#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0 -#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2) -#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2 -#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4) -#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4 -#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6) -#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6 -#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7) -#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7 - u8 version_max; - u8 version_min; -#elif defined(__LITTLE_ENDIAN) - u8 version_min; - u8 version_max; - u8 op_attr; -#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0) -#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0 -#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2) -#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2 -#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4) -#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4 -#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6) -#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6 -#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7) -#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7 - u8 op_code; -#endif - u32 data_length; - u32 isid_lo; -#if defined(__BIG_ENDIAN) - u16 isid_hi; - u16 tsih; -#elif defined(__LITTLE_ENDIAN) - u16 tsih; - u16 isid_hi; -#endif -#if defined(__BIG_ENDIAN) - u16 reserved2; - u16 itt; -#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0) -#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0 -#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14) -#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 itt; -#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0) -#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0 -#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14) -#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14 - u16 reserved2; -#endif -#if defined(__BIG_ENDIAN) - u16 cid; - u16 reserved3; -#elif defined(__LITTLE_ENDIAN) - u16 reserved3; - u16 cid; -#endif - u32 cmd_sn; - u32 exp_stat_sn; - u32 reserved4; - u32 resp_bd_list_addr_lo; - u32 resp_bd_list_addr_hi; - u32 resp_buffer; -#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) -#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 -#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24) -#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24 -#if defined(__BIG_ENDIAN) - u16 reserved8; - u8 reserved7; - u8 flags; -#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0) -#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0 -#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) -#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 -#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3) -#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3 -#elif defined(__LITTLE_ENDIAN) - u8 flags; -#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0) -#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0 -#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2) -#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2 -#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3) -#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3 - u8 reserved7; - u16 reserved8; -#endif - u32 bd_list_addr_lo; - u32 bd_list_addr_hi; -#if defined(__BIG_ENDIAN) - u8 cq_index; - u8 reserved10; - u8 reserved9; - u8 num_bds; -#elif defined(__LITTLE_ENDIAN) - u8 num_bds; - u8 reserved9; - u8 reserved10; - u8 cq_index; -#endif -}; - - -/* - * iSCSI Login CQE - */ -struct bnx2i_login_response { -#if defined(__BIG_ENDIAN) - u8 op_code; - u8 response_flags; -#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0) -#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0 -#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2) -#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2 -#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4) -#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4 -#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6) -#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6 -#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7) -#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7 - u8 version_max; - u8 version_active; -#elif defined(__LITTLE_ENDIAN) - u8 version_active; - u8 version_max; - u8 response_flags; -#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0) -#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0 -#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2) -#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2 -#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4) -#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4 -#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6) -#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6 -#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7) -#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7 - u8 op_code; -#endif - u32 data_length; - u32 exp_cmd_sn; - u32 max_cmd_sn; - u32 reserved1[2]; -#if defined(__BIG_ENDIAN) - u16 reserved3; - u8 err_code; - u8 reserved2; -#elif defined(__LITTLE_ENDIAN) - u8 reserved2; - u8 err_code; - u16 reserved3; -#endif - u32 stat_sn; - u32 isid_lo; -#if defined(__BIG_ENDIAN) - u16 isid_hi; - u16 tsih; -#elif defined(__LITTLE_ENDIAN) - u16 tsih; - u16 isid_hi; -#endif -#if defined(__BIG_ENDIAN) - u8 status_class; - u8 status_detail; - u16 reserved4; -#elif defined(__LITTLE_ENDIAN) - u16 reserved4; - u8 status_detail; - u8 status_class; -#endif - u32 reserved5[3]; -#if defined(__BIG_ENDIAN) - u16 reserved6; - u16 itt; -#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0) -#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0 -#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14) -#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 itt; -#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0) -#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0 -#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14) -#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14 - u16 reserved6; -#endif - u32 cq_req_sn; -}; - - -/* - * iSCSI Logout SQ WQE - */ -struct bnx2i_logout_request { -#if defined(__BIG_ENDIAN) - u8 op_code; - u8 op_attr; -#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0) -#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0 -#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7) -#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7 - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 op_attr; -#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0) -#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0 -#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7) -#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7 - u8 op_code; -#endif - u32 data_length; - u32 reserved1[2]; -#if defined(__BIG_ENDIAN) - u16 reserved2; - u16 itt; -#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0) -#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0 -#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14) -#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 itt; -#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0) -#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0 -#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14) -#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14 - u16 reserved2; -#endif -#if defined(__BIG_ENDIAN) - u16 cid; - u16 reserved3; -#elif defined(__LITTLE_ENDIAN) - u16 reserved3; - u16 cid; -#endif - u32 cmd_sn; - u32 reserved4[5]; - u32 zero_fill; - u32 bd_list_addr_lo; - u32 bd_list_addr_hi; -#if defined(__BIG_ENDIAN) - u8 cq_index; - u8 reserved6; - u8 reserved5; - u8 num_bds; -#elif defined(__LITTLE_ENDIAN) - u8 num_bds; - u8 reserved5; - u8 reserved6; - u8 cq_index; -#endif -}; - - -/* - * iSCSI Logout CQE - */ -struct bnx2i_logout_response { -#if defined(__BIG_ENDIAN) - u8 op_code; - u8 reserved1; - u8 response; - u8 reserved0; -#elif defined(__LITTLE_ENDIAN) - u8 reserved0; - u8 response; - u8 reserved1; - u8 op_code; -#endif - u32 reserved2; - u32 exp_cmd_sn; - u32 max_cmd_sn; - u32 reserved3[2]; -#if defined(__BIG_ENDIAN) - u16 reserved5; - u8 err_code; - u8 reserved4; -#elif defined(__LITTLE_ENDIAN) - u8 reserved4; - u8 err_code; - u16 reserved5; -#endif - u32 reserved6[3]; -#if defined(__BIG_ENDIAN) - u16 time_to_wait; - u16 time_to_retain; -#elif defined(__LITTLE_ENDIAN) - u16 time_to_retain; - u16 time_to_wait; -#endif - u32 reserved7[3]; -#if defined(__BIG_ENDIAN) - u16 reserved8; - u16 itt; -#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0) -#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0 -#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14) -#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 itt; -#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0) -#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0 -#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14) -#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14 - u16 reserved8; -#endif - u32 cq_req_sn; -}; - - -/* - * iSCSI Nop-In CQE - */ -struct bnx2i_nop_in_msg { -#if defined(__BIG_ENDIAN) - u8 op_code; - u8 reserved1; - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 reserved1; - u8 op_code; -#endif - u32 data_length; - u32 exp_cmd_sn; - u32 max_cmd_sn; - u32 ttt; - u32 reserved2; -#if defined(__BIG_ENDIAN) - u16 reserved4; - u8 err_code; - u8 reserved3; -#elif defined(__LITTLE_ENDIAN) - u8 reserved3; - u8 err_code; - u16 reserved4; -#endif - u32 reserved5; - u32 lun[2]; - u32 reserved6[4]; -#if defined(__BIG_ENDIAN) - u16 reserved7; - u16 itt; -#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0) -#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0 -#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14) -#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 itt; -#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0) -#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0 -#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14) -#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14 - u16 reserved7; -#endif - u32 cq_req_sn; -}; - - -/* - * iSCSI NOP-OUT SQ WQE - */ -struct bnx2i_nop_out_request { -#if defined(__BIG_ENDIAN) - u8 op_code; - u8 op_attr; -#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0) -#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0 -#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7) -#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7 - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 op_attr; -#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0) -#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0 -#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7) -#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7 - u8 op_code; -#endif - u32 data_length; - u32 lun[2]; -#if defined(__BIG_ENDIAN) - u16 reserved2; - u16 itt; -#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0) -#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0 -#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14) -#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 itt; -#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0) -#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0 -#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14) -#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14 - u16 reserved2; -#endif - u32 ttt; - u32 cmd_sn; - u32 reserved3[2]; - u32 resp_bd_list_addr_lo; - u32 resp_bd_list_addr_hi; - u32 resp_buffer; -#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) -#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 -#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24) -#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24 -#if defined(__BIG_ENDIAN) - u16 reserved7; - u8 reserved6; - u8 flags; -#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0) -#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0 -#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1) -#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1 -#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2) -#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2 -#elif defined(__LITTLE_ENDIAN) - u8 flags; -#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0) -#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0 -#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1) -#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1 -#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2) -#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2 - u8 reserved6; - u16 reserved7; -#endif - u32 bd_list_addr_lo; - u32 bd_list_addr_hi; -#if defined(__BIG_ENDIAN) - u8 cq_index; - u8 reserved9; - u8 reserved8; - u8 num_bds; -#elif defined(__LITTLE_ENDIAN) - u8 num_bds; - u8 reserved8; - u8 reserved9; - u8 cq_index; -#endif -}; - -/* - * iSCSI Reject CQE - */ -struct bnx2i_reject_msg { -#if defined(__BIG_ENDIAN) - u8 op_code; - u8 reserved1; - u8 reason; - u8 reserved0; -#elif defined(__LITTLE_ENDIAN) - u8 reserved0; - u8 reason; - u8 reserved1; - u8 op_code; -#endif - u32 data_length; - u32 exp_cmd_sn; - u32 max_cmd_sn; - u32 reserved2[2]; -#if defined(__BIG_ENDIAN) - u16 reserved4; - u8 err_code; - u8 reserved3; -#elif defined(__LITTLE_ENDIAN) - u8 reserved3; - u8 err_code; - u16 reserved4; -#endif - u32 reserved5[8]; - u32 cq_req_sn; -}; - -/* - * bnx2i iSCSI TMF SQ WQE - */ -struct bnx2i_tmf_request { -#if defined(__BIG_ENDIAN) - u8 op_code; - u8 op_attr; -#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0) -#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0 -#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7) -#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7 - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 op_attr; -#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0) -#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0 -#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7) -#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7 - u8 op_code; -#endif - u32 data_length; - u32 lun[2]; -#if defined(__BIG_ENDIAN) - u16 reserved1; - u16 itt; -#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0) -#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0 -#define ISCSI_TMF_REQUEST_TYPE (0x3<<14) -#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 itt; -#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0) -#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0 -#define ISCSI_TMF_REQUEST_TYPE (0x3<<14) -#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14 - u16 reserved1; -#endif - u32 ref_itt; - u32 cmd_sn; - u32 reserved2; - u32 ref_cmd_sn; - u32 reserved3[3]; - u32 zero_fill; - u32 bd_list_addr_lo; - u32 bd_list_addr_hi; -#if defined(__BIG_ENDIAN) - u8 cq_index; - u8 reserved5; - u8 reserved4; - u8 num_bds; -#elif defined(__LITTLE_ENDIAN) - u8 num_bds; - u8 reserved4; - u8 reserved5; - u8 cq_index; -#endif -}; - -/* - * iSCSI Text SQ WQE - */ -struct bnx2i_text_request { -#if defined(__BIG_ENDIAN) - u8 op_code; - u8 op_attr; -#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0) -#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0 -#define ISCSI_TEXT_REQUEST_CONT (0x1<<6) -#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6 -#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7) -#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7 - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 op_attr; -#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0) -#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0 -#define ISCSI_TEXT_REQUEST_CONT (0x1<<6) -#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6 -#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7) -#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7 - u8 op_code; -#endif - u32 data_length; - u32 lun[2]; -#if defined(__BIG_ENDIAN) - u16 reserved3; - u16 itt; -#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0) -#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0 -#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14) -#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 itt; -#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0) -#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0 -#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14) -#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14 - u16 reserved3; -#endif - u32 ttt; - u32 cmd_sn; - u32 reserved4[2]; - u32 resp_bd_list_addr_lo; - u32 resp_bd_list_addr_hi; - u32 resp_buffer; -#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0) -#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0 -#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24) -#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24 - u32 zero_fill; - u32 bd_list_addr_lo; - u32 bd_list_addr_hi; -#if defined(__BIG_ENDIAN) - u8 cq_index; - u8 reserved7; - u8 reserved6; - u8 num_bds; -#elif defined(__LITTLE_ENDIAN) - u8 num_bds; - u8 reserved6; - u8 reserved7; - u8 cq_index; -#endif -}; - -/* - * iSCSI SQ WQE - */ -union iscsi_request { - struct bnx2i_cmd_request cmd; - struct bnx2i_tmf_request tmf; - struct bnx2i_nop_out_request nop_out; - struct bnx2i_login_request login_req; - struct bnx2i_text_request text; - struct bnx2i_logout_request logout_req; - struct bnx2i_cleanup_request cleanup; -}; - - -/* - * iSCSI TMF CQE - */ -struct bnx2i_tmf_response { -#if defined(__BIG_ENDIAN) - u8 op_code; - u8 reserved1; - u8 response; - u8 reserved0; -#elif defined(__LITTLE_ENDIAN) - u8 reserved0; - u8 response; - u8 reserved1; - u8 op_code; -#endif - u32 reserved2; - u32 exp_cmd_sn; - u32 max_cmd_sn; - u32 reserved3[2]; -#if defined(__BIG_ENDIAN) - u16 reserved5; - u8 err_code; - u8 reserved4; -#elif defined(__LITTLE_ENDIAN) - u8 reserved4; - u8 err_code; - u16 reserved5; -#endif - u32 reserved6[7]; -#if defined(__BIG_ENDIAN) - u16 reserved7; - u16 itt; -#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0) -#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0 -#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14) -#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 itt; -#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0) -#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0 -#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14) -#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14 - u16 reserved7; -#endif - u32 cq_req_sn; -}; - -/* - * iSCSI Text CQE - */ -struct bnx2i_text_response { -#if defined(__BIG_ENDIAN) - u8 op_code; - u8 response_flags; -#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0) -#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0 -#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6) -#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6 -#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7) -#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7 - u16 reserved0; -#elif defined(__LITTLE_ENDIAN) - u16 reserved0; - u8 response_flags; -#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0) -#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0 -#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6) -#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6 -#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7) -#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7 - u8 op_code; -#endif - u32 data_length; - u32 exp_cmd_sn; - u32 max_cmd_sn; - u32 ttt; - u32 reserved2; -#if defined(__BIG_ENDIAN) - u16 reserved4; - u8 err_code; - u8 reserved3; -#elif defined(__LITTLE_ENDIAN) - u8 reserved3; - u8 err_code; - u16 reserved4; -#endif - u32 reserved5; - u32 lun[2]; - u32 reserved6[4]; -#if defined(__BIG_ENDIAN) - u16 reserved7; - u16 itt; -#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0) -#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0 -#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14) -#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14 -#elif defined(__LITTLE_ENDIAN) - u16 itt; -#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0) -#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0 -#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14) -#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14 - u16 reserved7; -#endif - u32 cq_req_sn; -}; - -/* - * iSCSI CQE - */ -union iscsi_response { - struct bnx2i_cmd_response cmd; - struct bnx2i_tmf_response tmf; - struct bnx2i_login_response login_resp; - struct bnx2i_text_response text; - struct bnx2i_logout_response logout_resp; - struct bnx2i_cleanup_response cleanup; - struct bnx2i_reject_msg reject; - struct bnx2i_async_msg async; - struct bnx2i_nop_in_msg nop_in; -}; - -#endif /* __57XX_ISCSI_HSI_LINUX_LE__ */ diff --git a/trunk/drivers/scsi/bnx2i/Kconfig b/trunk/drivers/scsi/bnx2i/Kconfig deleted file mode 100644 index 820d428ae839..000000000000 --- a/trunk/drivers/scsi/bnx2i/Kconfig +++ /dev/null @@ -1,7 +0,0 @@ -config SCSI_BNX2_ISCSI - tristate "Broadcom NetXtreme II iSCSI support" - select SCSI_ISCSI_ATTRS - select CNIC - ---help--- - This driver supports iSCSI offload for the Broadcom NetXtreme II - devices. diff --git a/trunk/drivers/scsi/bnx2i/Makefile b/trunk/drivers/scsi/bnx2i/Makefile deleted file mode 100644 index b5802bd2e76a..000000000000 --- a/trunk/drivers/scsi/bnx2i/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o - -obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o diff --git a/trunk/drivers/scsi/bnx2i/bnx2i.h b/trunk/drivers/scsi/bnx2i/bnx2i.h deleted file mode 100644 index d7576f28c6e9..000000000000 --- a/trunk/drivers/scsi/bnx2i/bnx2i.h +++ /dev/null @@ -1,771 +0,0 @@ -/* bnx2i.h: Broadcom NetXtreme II iSCSI driver. - * - * Copyright (c) 2006 - 2009 Broadcom Corporation - * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. - * Copyright (c) 2007, 2008 Mike Christie - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) - */ - -#ifndef _BNX2I_H_ -#define _BNX2I_H_ - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../../net/cnic_if.h" -#include "57xx_iscsi_hsi.h" -#include "57xx_iscsi_constants.h" - -#define BNX2_ISCSI_DRIVER_NAME "bnx2i" - -#define BNX2I_MAX_ADAPTERS 8 - -#define ISCSI_MAX_CONNS_PER_HBA 128 -#define ISCSI_MAX_SESS_PER_HBA ISCSI_MAX_CONNS_PER_HBA -#define ISCSI_MAX_CMDS_PER_SESS 128 - -/* Total active commands across all connections supported by devices */ -#define ISCSI_MAX_CMDS_PER_HBA_5708 (28 * (ISCSI_MAX_CMDS_PER_SESS - 1)) -#define ISCSI_MAX_CMDS_PER_HBA_5709 (128 * (ISCSI_MAX_CMDS_PER_SESS - 1)) -#define ISCSI_MAX_CMDS_PER_HBA_57710 (256 * (ISCSI_MAX_CMDS_PER_SESS - 1)) - -#define ISCSI_MAX_BDS_PER_CMD 32 - -#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8 -#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS 4 - -/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */ -#define MAX_BD_LENGTH 65535 -#define BD_SPLIT_SIZE 32768 - -/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */ -#define BNX2I_SQ_WQES_MIN 16 -#define BNX2I_570X_SQ_WQES_MAX 128 -#define BNX2I_5770X_SQ_WQES_MAX 512 -#define BNX2I_570X_SQ_WQES_DEFAULT 128 -#define BNX2I_5770X_SQ_WQES_DEFAULT 256 - -#define BNX2I_570X_CQ_WQES_MAX 128 -#define BNX2I_5770X_CQ_WQES_MAX 512 - -#define BNX2I_RQ_WQES_MIN 16 -#define BNX2I_RQ_WQES_MAX 32 -#define BNX2I_RQ_WQES_DEFAULT 16 - -/* CCELLs per conn */ -#define BNX2I_CCELLS_MIN 16 -#define BNX2I_CCELLS_MAX 96 -#define BNX2I_CCELLS_DEFAULT 64 - -#define ITT_INVALID_SIGNATURE 0xFFFF - -#define ISCSI_CMD_CLEANUP_TIMEOUT 100 - -#define BNX2I_CONN_CTX_BUF_SIZE 16384 - -#define BNX2I_SQ_WQE_SIZE 64 -#define BNX2I_RQ_WQE_SIZE 256 -#define BNX2I_CQE_SIZE 64 - -#define MB_KERNEL_CTX_SHIFT 8 -#define MB_KERNEL_CTX_SIZE (1 << MB_KERNEL_CTX_SHIFT) - -#define CTX_SHIFT 7 -#define GET_CID_NUM(cid_addr) ((cid_addr) >> CTX_SHIFT) - -#define CTX_OFFSET 0x10000 -#define MAX_CID_CNT 0x4000 - -/* 5709 context registers */ -#define BNX2_MQ_CONFIG2 0x00003d00 -#define BNX2_MQ_CONFIG2_CONT_SZ (0x7L<<4) -#define BNX2_MQ_CONFIG2_FIRST_L4L5 (0x1fL<<8) - -/* 57710's BAR2 is mapped to doorbell registers */ -#define BNX2X_DOORBELL_PCI_BAR 2 -#define BNX2X_MAX_CQS 8 - -#define CNIC_ARM_CQE 1 -#define CNIC_DISARM_CQE 0 - -#define REG_RD(__hba, offset) \ - readl(__hba->regview + offset) -#define REG_WR(__hba, offset, val) \ - writel(val, __hba->regview + offset) - - -/** - * struct generic_pdu_resc - login pdu resource structure - * - * @req_buf: driver buffer used to stage payload associated with - * the login request - * @req_dma_addr: dma address for iscsi login request payload buffer - * @req_buf_size: actual login request payload length - * @req_wr_ptr: pointer into login request buffer when next data is - * to be written - * @resp_hdr: iscsi header where iscsi login response header is to - * be recreated - * @resp_buf: buffer to stage login response payload - * @resp_dma_addr: login response payload buffer dma address - * @resp_buf_size: login response paylod length - * @resp_wr_ptr: pointer into login response buffer when next data is - * to be written - * @req_bd_tbl: iscsi login request payload BD table - * @req_bd_dma: login request BD table dma address - * @resp_bd_tbl: iscsi login response payload BD table - * @resp_bd_dma: login request BD table dma address - * - * following structure defines buffer info for generic pdus such as iSCSI Login, - * Logout and NOP - */ -struct generic_pdu_resc { - char *req_buf; - dma_addr_t req_dma_addr; - u32 req_buf_size; - char *req_wr_ptr; - struct iscsi_hdr resp_hdr; - char *resp_buf; - dma_addr_t resp_dma_addr; - u32 resp_buf_size; - char *resp_wr_ptr; - char *req_bd_tbl; - dma_addr_t req_bd_dma; - char *resp_bd_tbl; - dma_addr_t resp_bd_dma; -}; - - -/** - * struct bd_resc_page - tracks DMA'able memory allocated for BD tables - * - * @link: list head to link elements - * @max_ptrs: maximun pointers that can be stored in this page - * @num_valid: number of pointer valid in this page - * @page: base addess for page pointer array - * - * structure to track DMA'able memory allocated for command BD tables - */ -struct bd_resc_page { - struct list_head link; - u32 max_ptrs; - u32 num_valid; - void *page[1]; -}; - - -/** - * struct io_bdt - I/O buffer destricptor table - * - * @bd_tbl: BD table's virtual address - * @bd_tbl_dma: BD table's dma address - * @bd_valid: num valid BD entries - * - * IO BD table - */ -struct io_bdt { - struct iscsi_bd *bd_tbl; - dma_addr_t bd_tbl_dma; - u16 bd_valid; -}; - - -/** - * bnx2i_cmd - iscsi command structure - * - * @scsi_cmd: SCSI-ML task pointer corresponding to this iscsi cmd - * @sg: SG list - * @io_tbl: buffer descriptor (BD) table - * @bd_tbl_dma: buffer descriptor (BD) table's dma address - */ -struct bnx2i_cmd { - struct iscsi_hdr hdr; - struct bnx2i_conn *conn; - struct scsi_cmnd *scsi_cmd; - struct scatterlist *sg; - struct io_bdt io_tbl; - dma_addr_t bd_tbl_dma; - struct bnx2i_cmd_request req; -}; - - -/** - * struct bnx2i_conn - iscsi connection structure - * - * @cls_conn: pointer to iscsi cls conn - * @hba: adapter structure pointer - * @iscsi_conn_cid: iscsi conn id - * @fw_cid: firmware iscsi context id - * @ep: endpoint structure pointer - * @gen_pdu: login/nopout/logout pdu resources - * @violation_notified: bit mask used to track iscsi error/warning messages - * already printed out - * - * iSCSI connection structure - */ -struct bnx2i_conn { - struct iscsi_cls_conn *cls_conn; - struct bnx2i_hba *hba; - struct completion cmd_cleanup_cmpl; - int is_bound; - - u32 iscsi_conn_cid; -#define BNX2I_CID_RESERVED 0x5AFF - u32 fw_cid; - - struct timer_list poll_timer; - /* - * Queue Pair (QP) related structure elements. - */ - struct bnx2i_endpoint *ep; - - /* - * Buffer for login negotiation process - */ - struct generic_pdu_resc gen_pdu; - u64 violation_notified; -}; - - - -/** - * struct iscsi_cid_queue - Per adapter iscsi cid queue - * - * @cid_que_base: queue base memory - * @cid_que: queue memory pointer - * @cid_q_prod_idx: produce index - * @cid_q_cons_idx: consumer index - * @cid_q_max_idx: max index. used to detect wrap around condition - * @cid_free_cnt: queue size - * @conn_cid_tbl: iscsi cid to conn structure mapping table - * - * Per adapter iSCSI CID Queue - */ -struct iscsi_cid_queue { - void *cid_que_base; - u32 *cid_que; - u32 cid_q_prod_idx; - u32 cid_q_cons_idx; - u32 cid_q_max_idx; - u32 cid_free_cnt; - struct bnx2i_conn **conn_cid_tbl; -}; - -/** - * struct bnx2i_hba - bnx2i adapter structure - * - * @link: list head to link elements - * @cnic: pointer to cnic device - * @pcidev: pointer to pci dev - * @netdev: pointer to netdev structure - * @regview: mapped PCI register space - * @age: age, incremented by every recovery - * @cnic_dev_type: cnic device type, 5706/5708/5709/57710 - * @mail_queue_access: mailbox queue access mode, applicable to 5709 only - * @reg_with_cnic: indicates whether the device is register with CNIC - * @adapter_state: adapter state, UP, GOING_DOWN, LINK_DOWN - * @mtu_supported: Ethernet MTU supported - * @shost: scsi host pointer - * @max_sqes: SQ size - * @max_rqes: RQ size - * @max_cqes: CQ size - * @num_ccell: number of command cells per connection - * @ofld_conns_active: active connection list - * @max_active_conns: max offload connections supported by this device - * @cid_que: iscsi cid queue - * @ep_rdwr_lock: read / write lock to synchronize various ep lists - * @ep_ofld_list: connection list for pending offload completion - * @ep_destroy_list: connection list for pending offload completion - * @mp_bd_tbl: BD table to be used with middle path requests - * @mp_bd_dma: DMA address of 'mp_bd_tbl' memory buffer - * @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs - * @dummy_buf_dma: DMA address of 'dummy_buffer' memory buffer - * @lock: lock to synchonize access to hba structure - * @pci_did: PCI device ID - * @pci_vid: PCI vendor ID - * @pci_sdid: PCI subsystem device ID - * @pci_svid: PCI subsystem vendor ID - * @pci_func: PCI function number in system pci tree - * @pci_devno: PCI device number in system pci tree - * @num_wqe_sent: statistic counter, total wqe's sent - * @num_cqe_rcvd: statistic counter, total cqe's received - * @num_intr_claimed: statistic counter, total interrupts claimed - * @link_changed_count: statistic counter, num of link change notifications - * received - * @ipaddr_changed_count: statistic counter, num times IP address changed while - * at least one connection is offloaded - * @num_sess_opened: statistic counter, total num sessions opened - * @num_conn_opened: statistic counter, total num conns opened on this hba - * @ctx_ccell_tasks: captures number of ccells and tasks supported by - * currently offloaded connection, used to decode - * context memory - * - * Adapter Data Structure - */ -struct bnx2i_hba { - struct list_head link; - struct cnic_dev *cnic; - struct pci_dev *pcidev; - struct net_device *netdev; - void __iomem *regview; - - u32 age; - unsigned long cnic_dev_type; - #define BNX2I_NX2_DEV_5706 0x0 - #define BNX2I_NX2_DEV_5708 0x1 - #define BNX2I_NX2_DEV_5709 0x2 - #define BNX2I_NX2_DEV_57710 0x3 - u32 mail_queue_access; - #define BNX2I_MQ_KERNEL_MODE 0x0 - #define BNX2I_MQ_KERNEL_BYPASS_MODE 0x1 - #define BNX2I_MQ_BIN_MODE 0x2 - unsigned long reg_with_cnic; - #define BNX2I_CNIC_REGISTERED 1 - - unsigned long adapter_state; - #define ADAPTER_STATE_UP 0 - #define ADAPTER_STATE_GOING_DOWN 1 - #define ADAPTER_STATE_LINK_DOWN 2 - #define ADAPTER_STATE_INIT_FAILED 31 - unsigned int mtu_supported; - #define BNX2I_MAX_MTU_SUPPORTED 1500 - - struct Scsi_Host *shost; - - u32 max_sqes; - u32 max_rqes; - u32 max_cqes; - u32 num_ccell; - - int ofld_conns_active; - - int max_active_conns; - struct iscsi_cid_queue cid_que; - - rwlock_t ep_rdwr_lock; - struct list_head ep_ofld_list; - struct list_head ep_destroy_list; - - /* - * BD table to be used with MP (Middle Path requests. - */ - char *mp_bd_tbl; - dma_addr_t mp_bd_dma; - char *dummy_buffer; - dma_addr_t dummy_buf_dma; - - spinlock_t lock; /* protects hba structure access */ - struct mutex net_dev_lock;/* sync net device access */ - - /* - * PCI related info. - */ - u16 pci_did; - u16 pci_vid; - u16 pci_sdid; - u16 pci_svid; - u16 pci_func; - u16 pci_devno; - - /* - * Following are a bunch of statistics useful during development - * and later stage for score boarding. - */ - u32 num_wqe_sent; - u32 num_cqe_rcvd; - u32 num_intr_claimed; - u32 link_changed_count; - u32 ipaddr_changed_count; - u32 num_sess_opened; - u32 num_conn_opened; - unsigned int ctx_ccell_tasks; -}; - - -/******************************************************************************* - * QP [ SQ / RQ / CQ ] info. - ******************************************************************************/ - -/* - * SQ/RQ/CQ generic structure definition - */ -struct sqe { - u8 sqe_byte[BNX2I_SQ_WQE_SIZE]; -}; - -struct rqe { - u8 rqe_byte[BNX2I_RQ_WQE_SIZE]; -}; - -struct cqe { - u8 cqe_byte[BNX2I_CQE_SIZE]; -}; - - -enum { -#if defined(__LITTLE_ENDIAN) - CNIC_EVENT_COAL_INDEX = 0x0, - CNIC_SEND_DOORBELL = 0x4, - CNIC_EVENT_CQ_ARM = 0x7, - CNIC_RECV_DOORBELL = 0x8 -#elif defined(__BIG_ENDIAN) - CNIC_EVENT_COAL_INDEX = 0x2, - CNIC_SEND_DOORBELL = 0x6, - CNIC_EVENT_CQ_ARM = 0x4, - CNIC_RECV_DOORBELL = 0xa -#endif -}; - - -/* - * CQ DB - */ -struct bnx2x_iscsi_cq_pend_cmpl { - /* CQ producer, updated by Ustorm */ - u16 ustrom_prod; - /* CQ pending completion counter */ - u16 pend_cntr; -}; - - -struct bnx2i_5771x_cq_db { - struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS]; - /* CQ pending completion ITT array */ - u16 itt[BNX2X_MAX_CQS]; - /* Cstorm CQ sequence to notify array, updated by driver */; - u16 sqn[BNX2X_MAX_CQS]; - u32 reserved[4] /* 16 byte allignment */; -}; - - -struct bnx2i_5771x_sq_rq_db { - u16 prod_idx; - u8 reserved0[14]; /* Pad structure size to 16 bytes */ -}; - - -struct bnx2i_5771x_dbell_hdr { - u8 header; - /* 1 for rx doorbell, 0 for tx doorbell */ -#define B577XX_DOORBELL_HDR_RX (0x1<<0) -#define B577XX_DOORBELL_HDR_RX_SHIFT 0 - /* 0 for normal doorbell, 1 for advertise wnd doorbell */ -#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1) -#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1 - /* rdma tx only: DPM transaction size specifier (64/128/256/512B) */ -#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2) -#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2 - /* connection type */ -#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4) -#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4 -}; - -struct bnx2i_5771x_dbell { - struct bnx2i_5771x_dbell_hdr dbell; - u8 pad[3]; - -}; - -/** - * struct qp_info - QP (share queue region) atrributes structure - * - * @ctx_base: ioremapped pci register base to access doorbell register - * pertaining to this offloaded connection - * @sq_virt: virtual address of send queue (SQ) region - * @sq_phys: DMA address of SQ memory region - * @sq_mem_size: SQ size - * @sq_prod_qe: SQ producer entry pointer - * @sq_cons_qe: SQ consumer entry pointer - * @sq_first_qe: virtaul address of first entry in SQ - * @sq_last_qe: virtaul address of last entry in SQ - * @sq_prod_idx: SQ producer index - * @sq_cons_idx: SQ consumer index - * @sqe_left: number sq entry left - * @sq_pgtbl_virt: page table describing buffer consituting SQ region - * @sq_pgtbl_phys: dma address of 'sq_pgtbl_virt' - * @sq_pgtbl_size: SQ page table size - * @cq_virt: virtual address of completion queue (CQ) region - * @cq_phys: DMA address of RQ memory region - * @cq_mem_size: CQ size - * @cq_prod_qe: CQ producer entry pointer - * @cq_cons_qe: CQ consumer entry pointer - * @cq_first_qe: virtaul address of first entry in CQ - * @cq_last_qe: virtaul address of last entry in CQ - * @cq_prod_idx: CQ producer index - * @cq_cons_idx: CQ consumer index - * @cqe_left: number cq entry left - * @cqe_size: size of each CQ entry - * @cqe_exp_seq_sn: next expected CQE sequence number - * @cq_pgtbl_virt: page table describing buffer consituting CQ region - * @cq_pgtbl_phys: dma address of 'cq_pgtbl_virt' - * @cq_pgtbl_size: CQ page table size - * @rq_virt: virtual address of receive queue (RQ) region - * @rq_phys: DMA address of RQ memory region - * @rq_mem_size: RQ size - * @rq_prod_qe: RQ producer entry pointer - * @rq_cons_qe: RQ consumer entry pointer - * @rq_first_qe: virtaul address of first entry in RQ - * @rq_last_qe: virtaul address of last entry in RQ - * @rq_prod_idx: RQ producer index - * @rq_cons_idx: RQ consumer index - * @rqe_left: number rq entry left - * @rq_pgtbl_virt: page table describing buffer consituting RQ region - * @rq_pgtbl_phys: dma address of 'rq_pgtbl_virt' - * @rq_pgtbl_size: RQ page table size - * - * queue pair (QP) is a per connection shared data structure which is used - * to send work requests (SQ), receive completion notifications (CQ) - * and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure - * below holds queue memory, consumer/producer indexes and page table - * information - */ -struct qp_info { - void __iomem *ctx_base; -#define DPM_TRIGER_TYPE 0x40 - -#define BNX2I_570x_QUE_DB_SIZE 0 -#define BNX2I_5771x_QUE_DB_SIZE 16 - struct sqe *sq_virt; - dma_addr_t sq_phys; - u32 sq_mem_size; - - struct sqe *sq_prod_qe; - struct sqe *sq_cons_qe; - struct sqe *sq_first_qe; - struct sqe *sq_last_qe; - u16 sq_prod_idx; - u16 sq_cons_idx; - u32 sqe_left; - - void *sq_pgtbl_virt; - dma_addr_t sq_pgtbl_phys; - u32 sq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */ - - struct cqe *cq_virt; - dma_addr_t cq_phys; - u32 cq_mem_size; - - struct cqe *cq_prod_qe; - struct cqe *cq_cons_qe; - struct cqe *cq_first_qe; - struct cqe *cq_last_qe; - u16 cq_prod_idx; - u16 cq_cons_idx; - u32 cqe_left; - u32 cqe_size; - u32 cqe_exp_seq_sn; - - void *cq_pgtbl_virt; - dma_addr_t cq_pgtbl_phys; - u32 cq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */ - - struct rqe *rq_virt; - dma_addr_t rq_phys; - u32 rq_mem_size; - - struct rqe *rq_prod_qe; - struct rqe *rq_cons_qe; - struct rqe *rq_first_qe; - struct rqe *rq_last_qe; - u16 rq_prod_idx; - u16 rq_cons_idx; - u32 rqe_left; - - void *rq_pgtbl_virt; - dma_addr_t rq_pgtbl_phys; - u32 rq_pgtbl_size; /* set to PAGE_SIZE for 5708 & 5709 */ -}; - - - -/* - * CID handles - */ -struct ep_handles { - u32 fw_cid; - u32 drv_iscsi_cid; - u16 pg_cid; - u16 rsvd; -}; - - -enum { - EP_STATE_IDLE = 0x0, - EP_STATE_PG_OFLD_START = 0x1, - EP_STATE_PG_OFLD_COMPL = 0x2, - EP_STATE_OFLD_START = 0x4, - EP_STATE_OFLD_COMPL = 0x8, - EP_STATE_CONNECT_START = 0x10, - EP_STATE_CONNECT_COMPL = 0x20, - EP_STATE_ULP_UPDATE_START = 0x40, - EP_STATE_ULP_UPDATE_COMPL = 0x80, - EP_STATE_DISCONN_START = 0x100, - EP_STATE_DISCONN_COMPL = 0x200, - EP_STATE_CLEANUP_START = 0x400, - EP_STATE_CLEANUP_CMPL = 0x800, - EP_STATE_TCP_FIN_RCVD = 0x1000, - EP_STATE_TCP_RST_RCVD = 0x2000, - EP_STATE_PG_OFLD_FAILED = 0x1000000, - EP_STATE_ULP_UPDATE_FAILED = 0x2000000, - EP_STATE_CLEANUP_FAILED = 0x4000000, - EP_STATE_OFLD_FAILED = 0x8000000, - EP_STATE_CONNECT_FAILED = 0x10000000, - EP_STATE_DISCONN_TIMEDOUT = 0x20000000, -}; - -/** - * struct bnx2i_endpoint - representation of tcp connection in NX2 world - * - * @link: list head to link elements - * @hba: adapter to which this connection belongs - * @conn: iscsi connection this EP is linked to - * @sess: iscsi session this EP is linked to - * @cm_sk: cnic sock struct - * @hba_age: age to detect if 'iscsid' issues ep_disconnect() - * after HBA reset is completed by bnx2i/cnic/bnx2 - * modules - * @state: tracks offload connection state machine - * @teardown_mode: indicates if conn teardown is abortive or orderly - * @qp: QP information - * @ids: contains chip allocated *context id* & driver assigned - * *iscsi cid* - * @ofld_timer: offload timer to detect timeout - * @ofld_wait: wait queue - * - * Endpoint Structure - equivalent of tcp socket structure - */ -struct bnx2i_endpoint { - struct list_head link; - struct bnx2i_hba *hba; - struct bnx2i_conn *conn; - struct cnic_sock *cm_sk; - u32 hba_age; - u32 state; - unsigned long timestamp; - int num_active_cmds; - - struct qp_info qp; - struct ep_handles ids; - #define ep_iscsi_cid ids.drv_iscsi_cid - #define ep_cid ids.fw_cid - #define ep_pg_cid ids.pg_cid - struct timer_list ofld_timer; - wait_queue_head_t ofld_wait; -}; - - - -/* Global variables */ -extern unsigned int error_mask1, error_mask2; -extern u64 iscsi_error_mask; -extern unsigned int en_tcp_dack; -extern unsigned int event_coal_div; - -extern struct scsi_transport_template *bnx2i_scsi_xport_template; -extern struct iscsi_transport bnx2i_iscsi_transport; -extern struct cnic_ulp_ops bnx2i_cnic_cb; - -extern unsigned int sq_size; -extern unsigned int rq_size; - -extern struct device_attribute *bnx2i_dev_attributes[]; - - - -/* - * Function Prototypes - */ -extern void bnx2i_identify_device(struct bnx2i_hba *hba); -extern void bnx2i_register_device(struct bnx2i_hba *hba); - -extern void bnx2i_ulp_init(struct cnic_dev *dev); -extern void bnx2i_ulp_exit(struct cnic_dev *dev); -extern void bnx2i_start(void *handle); -extern void bnx2i_stop(void *handle); -extern void bnx2i_reg_dev_all(void); -extern void bnx2i_unreg_dev_all(void); -extern struct bnx2i_hba *get_adapter_list_head(void); - -struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, - u16 iscsi_cid); - -int bnx2i_alloc_ep_pool(void); -void bnx2i_release_ep_pool(void); -struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba); -struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba); - -struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic); - -struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic); -void bnx2i_free_hba(struct bnx2i_hba *hba); - -void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len); -void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count); - -void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd); - -void bnx2i_drop_session(struct iscsi_cls_session *session); - -extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba); -extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn, - struct iscsi_task *mtask); -extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn, - struct iscsi_task *mtask); -extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn, - struct bnx2i_cmd *cmnd); -extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn, - struct iscsi_task *mtask, u32 ttt, - char *datap, int data_len, int unsol); -extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn, - struct iscsi_task *mtask); -extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, - struct bnx2i_cmd *cmd); -extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, - struct bnx2i_endpoint *ep); -extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn); -extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, - struct bnx2i_endpoint *ep); - -extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, - struct bnx2i_endpoint *ep); -extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba, - struct bnx2i_endpoint *ep); -extern void bnx2i_ep_ofld_timer(unsigned long data); -extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list( - struct bnx2i_hba *hba, u32 iscsi_cid); -extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list( - struct bnx2i_hba *hba, u32 iscsi_cid); - -extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep); -extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action); - -/* Debug related function prototypes */ -extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn); -extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn); -extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn); -extern void bnx2i_print_recv_state(struct bnx2i_conn *conn); - -#endif diff --git a/trunk/drivers/scsi/bnx2i/bnx2i_hwi.c b/trunk/drivers/scsi/bnx2i/bnx2i_hwi.c deleted file mode 100644 index 906cef5cda86..000000000000 --- a/trunk/drivers/scsi/bnx2i/bnx2i_hwi.c +++ /dev/null @@ -1,2405 +0,0 @@ -/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver. - * - * Copyright (c) 2006 - 2009 Broadcom Corporation - * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. - * Copyright (c) 2007, 2008 Mike Christie - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) - */ - -#include -#include -#include "bnx2i.h" - -/** - * bnx2i_get_cid_num - get cid from ep - * @ep: endpoint pointer - * - * Only applicable to 57710 family of devices - */ -static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep) -{ - u32 cid; - - if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) - cid = ep->ep_cid; - else - cid = GET_CID_NUM(ep->ep_cid); - return cid; -} - - -/** - * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type - * @hba: Adapter for which adjustments is to be made - * - * Only applicable to 57710 family of devices - */ -static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) -{ - u32 num_elements_per_pg; - - if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) || - test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) || - test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { - if (!is_power_of_2(hba->max_sqes)) - hba->max_sqes = rounddown_pow_of_two(hba->max_sqes); - - if (!is_power_of_2(hba->max_rqes)) - hba->max_rqes = rounddown_pow_of_two(hba->max_rqes); - } - - /* Adjust each queue size if the user selection does not - * yield integral num of page buffers - */ - /* adjust SQ */ - num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; - if (hba->max_sqes < num_elements_per_pg) - hba->max_sqes = num_elements_per_pg; - else if (hba->max_sqes % num_elements_per_pg) - hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) & - ~(num_elements_per_pg - 1); - - /* adjust CQ */ - num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE; - if (hba->max_cqes < num_elements_per_pg) - hba->max_cqes = num_elements_per_pg; - else if (hba->max_cqes % num_elements_per_pg) - hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) & - ~(num_elements_per_pg - 1); - - /* adjust RQ */ - num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE; - if (hba->max_rqes < num_elements_per_pg) - hba->max_rqes = num_elements_per_pg; - else if (hba->max_rqes % num_elements_per_pg) - hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) & - ~(num_elements_per_pg - 1); -} - - -/** - * bnx2i_get_link_state - get network interface link state - * @hba: adapter instance pointer - * - * updates adapter structure flag based on netdev state - */ -static void bnx2i_get_link_state(struct bnx2i_hba *hba) -{ - if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state)) - set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); - else - clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); -} - - -/** - * bnx2i_iscsi_license_error - displays iscsi license related error message - * @hba: adapter instance pointer - * @error_code: error classification - * - * Puts out an error log when driver is unable to offload iscsi connection - * due to license restrictions - */ -static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code) -{ - if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED) - /* iSCSI offload not supported on this device */ - printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n", - hba->netdev->name); - if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED) - /* iSCSI offload not supported on this LOM device */ - printk(KERN_ERR "bnx2i: LOM is not enable to " - "offload iSCSI connections, dev=%s\n", - hba->netdev->name); - set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state); -} - - -/** - * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification - * @ep: endpoint (transport indentifier) structure - * @action: action, ARM or DISARM. For now only ARM_CQE is used - * - * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt - * the driver. EQ event is generated CQ index is hit or at least 1 CQ is - * outstanding and on chip timer expires - */ -void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action) -{ - struct bnx2i_5771x_cq_db *cq_db; - u16 cq_index; - - if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) - return; - - if (action == CNIC_ARM_CQE) { - cq_index = ep->qp.cqe_exp_seq_sn + - ep->num_active_cmds / event_coal_div; - cq_index %= (ep->qp.cqe_size * 2 + 1); - if (!cq_index) { - cq_index = 1; - cq_db = (struct bnx2i_5771x_cq_db *) - ep->qp.cq_pgtbl_virt; - cq_db->sqn[0] = cq_index; - } - } -} - - -/** - * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer - * @conn: iscsi connection on which RQ event occured - * @ptr: driver buffer to which RQ buffer contents is to - * be copied - * @len: length of valid data inside RQ buf - * - * Copies RQ buffer contents from shared (DMA'able) memory region to - * driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and - * scsi sense info - */ -void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len) -{ - if (!bnx2i_conn->ep->qp.rqe_left) - return; - - bnx2i_conn->ep->qp.rqe_left--; - memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len); - if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) { - bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe; - bnx2i_conn->ep->qp.rq_cons_idx = 0; - } else { - bnx2i_conn->ep->qp.rq_cons_qe++; - bnx2i_conn->ep->qp.rq_cons_idx++; - } -} - - -static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn) -{ - struct bnx2i_5771x_dbell dbell; - u32 msg; - - memset(&dbell, 0, sizeof(dbell)); - dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE << - B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT); - msg = *((u32 *)&dbell); - /* TODO : get doorbell register mapping */ - writel(cpu_to_le32(msg), conn->ep->qp.ctx_base); -} - - -/** - * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell - * @conn: iscsi connection on which event to post - * @count: number of RQ buffer being posted to chip - * - * No need to ring hardware doorbell for 57710 family of devices - */ -void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count) -{ - struct bnx2i_5771x_sq_rq_db *rq_db; - u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000); - struct bnx2i_endpoint *ep = bnx2i_conn->ep; - - ep->qp.rqe_left += count; - ep->qp.rq_prod_idx &= 0x7FFF; - ep->qp.rq_prod_idx += count; - - if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) { - ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes; - if (!hi_bit) - ep->qp.rq_prod_idx |= 0x8000; - } else - ep->qp.rq_prod_idx |= hi_bit; - - if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { - rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt; - rq_db->prod_idx = ep->qp.rq_prod_idx; - /* no need to ring hardware doorbell for 57710 */ - } else { - writew(ep->qp.rq_prod_idx, - ep->qp.ctx_base + CNIC_RECV_DOORBELL); - } - mmiowb(); -} - - -/** - * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine - * @conn: iscsi connection to which new SQ entries belong - * @count: number of SQ WQEs to post - * - * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family - * of devices. For 5706/5708/5709 new SQ WQE count is written into the - * doorbell register - */ -static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count) -{ - struct bnx2i_5771x_sq_rq_db *sq_db; - struct bnx2i_endpoint *ep = bnx2i_conn->ep; - - ep->num_active_cmds++; - wmb(); /* flush SQ WQE memory before the doorbell is rung */ - if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { - sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt; - sq_db->prod_idx = ep->qp.sq_prod_idx; - bnx2i_ring_577xx_doorbell(bnx2i_conn); - } else - writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL); - - mmiowb(); /* flush posted PCI writes */ -} - - -/** - * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters - * @conn: iscsi connection to which new SQ entries belong - * @count: number of SQ WQEs to post - * - * this routine will update SQ driver parameters and ring the doorbell - */ -static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn, - int count) -{ - int tmp_cnt; - - if (count == 1) { - if (bnx2i_conn->ep->qp.sq_prod_qe == - bnx2i_conn->ep->qp.sq_last_qe) - bnx2i_conn->ep->qp.sq_prod_qe = - bnx2i_conn->ep->qp.sq_first_qe; - else - bnx2i_conn->ep->qp.sq_prod_qe++; - } else { - if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <= - bnx2i_conn->ep->qp.sq_last_qe) - bnx2i_conn->ep->qp.sq_prod_qe += count; - else { - tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe - - bnx2i_conn->ep->qp.sq_prod_qe; - bnx2i_conn->ep->qp.sq_prod_qe = - &bnx2i_conn->ep->qp.sq_first_qe[count - - (tmp_cnt + 1)]; - } - } - bnx2i_conn->ep->qp.sq_prod_idx += count; - /* Ring the doorbell */ - bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx); -} - - -/** - * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware - * @conn: iscsi connection - * @cmd: driver command structure which is requesting - * a WQE to sent to chip for further processing - * - * prepare and post an iSCSI Login request WQE to CNIC firmware - */ -int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn, - struct iscsi_task *task) -{ - struct bnx2i_cmd *bnx2i_cmd; - struct bnx2i_login_request *login_wqe; - struct iscsi_login *login_hdr; - u32 dword; - - bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data; - login_hdr = (struct iscsi_login *)task->hdr; - login_wqe = (struct bnx2i_login_request *) - bnx2i_conn->ep->qp.sq_prod_qe; - - login_wqe->op_code = login_hdr->opcode; - login_wqe->op_attr = login_hdr->flags; - login_wqe->version_max = login_hdr->max_version; - login_wqe->version_min = login_hdr->min_version; - login_wqe->data_length = ntoh24(login_hdr->dlength); - login_wqe->isid_lo = *((u32 *) login_hdr->isid); - login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2); - login_wqe->tsih = login_hdr->tsih; - login_wqe->itt = task->itt | - (ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT); - login_wqe->cid = login_hdr->cid; - - login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn); - login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn); - - login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma; - login_wqe->resp_bd_list_addr_hi = - (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32); - - dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) | - (bnx2i_conn->gen_pdu.resp_buf_size << - ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT)); - login_wqe->resp_buffer = dword; - login_wqe->flags = 0; - login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma; - login_wqe->bd_list_addr_hi = - (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32); - login_wqe->num_bds = 1; - login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ - - bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); - return 0; -} - -/** - * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware - * @conn: iscsi connection - * @mtask: driver command structure which is requesting - * a WQE to sent to chip for further processing - * - * prepare and post an iSCSI Login request WQE to CNIC firmware - */ -int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn, - struct iscsi_task *mtask) -{ - struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; - struct iscsi_tm *tmfabort_hdr; - struct scsi_cmnd *ref_sc; - struct iscsi_task *ctask; - struct bnx2i_cmd *bnx2i_cmd; - struct bnx2i_tmf_request *tmfabort_wqe; - u32 dword; - - bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data; - tmfabort_hdr = (struct iscsi_tm *)mtask->hdr; - tmfabort_wqe = (struct bnx2i_tmf_request *) - bnx2i_conn->ep->qp.sq_prod_qe; - - tmfabort_wqe->op_code = tmfabort_hdr->opcode; - tmfabort_wqe->op_attr = 0; - tmfabort_wqe->op_attr = - ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK; - tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]); - tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]); - - tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14)); - tmfabort_wqe->reserved2 = 0; - tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn); - - ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt); - if (!ctask || ctask->sc) - /* - * the iscsi layer must have completed the cmd while this - * was starting up. - */ - return 0; - ref_sc = ctask->sc; - - if (ref_sc->sc_data_direction == DMA_TO_DEVICE) - dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); - else - dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); - tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt); - tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn); - - tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; - tmfabort_wqe->bd_list_addr_hi = (u32) - ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); - tmfabort_wqe->num_bds = 1; - tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ - - bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); - return 0; -} - -/** - * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware - * @conn: iscsi connection - * @cmd: driver command structure which is requesting - * a WQE to sent to chip for further processing - * - * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware - */ -int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn, - struct bnx2i_cmd *cmd) -{ - struct bnx2i_cmd_request *scsi_cmd_wqe; - - scsi_cmd_wqe = (struct bnx2i_cmd_request *) - bnx2i_conn->ep->qp.sq_prod_qe; - memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request)); - scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ - - bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); - return 0; -} - -/** - * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware - * @conn: iscsi connection - * @cmd: driver command structure which is requesting - * a WQE to sent to chip for further processing - * @ttt: TTT to be used when building pdu header - * @datap: payload buffer pointer - * @data_len: payload data length - * @unsol: indicated whether nopout pdu is unsolicited pdu or - * in response to target's NOPIN w/ TTT != FFFFFFFF - * - * prepare and post a nopout request WQE to CNIC firmware - */ -int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn, - struct iscsi_task *task, u32 ttt, - char *datap, int data_len, int unsol) -{ - struct bnx2i_endpoint *ep = bnx2i_conn->ep; - struct bnx2i_cmd *bnx2i_cmd; - struct bnx2i_nop_out_request *nopout_wqe; - struct iscsi_nopout *nopout_hdr; - - bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data; - nopout_hdr = (struct iscsi_nopout *)task->hdr; - nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe; - nopout_wqe->op_code = nopout_hdr->opcode; - nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL; - memcpy(nopout_wqe->lun, nopout_hdr->lun, 8); - - if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { - u32 tmp = nopout_hdr->lun[0]; - /* 57710 requires LUN field to be swapped */ - nopout_hdr->lun[0] = nopout_hdr->lun[1]; - nopout_hdr->lun[1] = tmp; - } - - nopout_wqe->itt = ((u16)task->itt | - (ISCSI_TASK_TYPE_MPATH << - ISCSI_TMF_REQUEST_TYPE_SHIFT)); - nopout_wqe->ttt = ttt; - nopout_wqe->flags = 0; - if (!unsol) - nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; - else if (nopout_hdr->itt == RESERVED_ITT) - nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION; - - nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn); - nopout_wqe->data_length = data_len; - if (data_len) { - /* handle payload data, not required in first release */ - printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n"); - } else { - nopout_wqe->bd_list_addr_lo = (u32) - bnx2i_conn->hba->mp_bd_dma; - nopout_wqe->bd_list_addr_hi = - (u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); - nopout_wqe->num_bds = 1; - } - nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ - - bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); - return 0; -} - - -/** - * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware - * @conn: iscsi connection - * @cmd: driver command structure which is requesting - * a WQE to sent to chip for further processing - * - * prepare and post logout request WQE to CNIC firmware - */ -int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn, - struct iscsi_task *task) -{ - struct bnx2i_cmd *bnx2i_cmd; - struct bnx2i_logout_request *logout_wqe; - struct iscsi_logout *logout_hdr; - - bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data; - logout_hdr = (struct iscsi_logout *)task->hdr; - - logout_wqe = (struct bnx2i_logout_request *) - bnx2i_conn->ep->qp.sq_prod_qe; - memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request)); - - logout_wqe->op_code = logout_hdr->opcode; - logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn); - logout_wqe->op_attr = - logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE; - logout_wqe->itt = ((u16)task->itt | - (ISCSI_TASK_TYPE_MPATH << - ISCSI_LOGOUT_REQUEST_TYPE_SHIFT)); - logout_wqe->data_length = 0; - logout_wqe->cid = 0; - - logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma; - logout_wqe->bd_list_addr_hi = (u32) - ((u64) bnx2i_conn->hba->mp_bd_dma >> 32); - logout_wqe->num_bds = 1; - logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */ - - bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1); - return 0; -} - - -/** - * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware - * @conn: iscsi connection which requires iscsi parameter update - * - * sends down iSCSI Conn Update request to move iSCSI conn to FFP - */ -void bnx2i_update_iscsi_conn(struct iscsi_conn *conn) -{ - struct bnx2i_conn *bnx2i_conn = conn->dd_data; - struct bnx2i_hba *hba = bnx2i_conn->hba; - struct kwqe *kwqe_arr[2]; - struct iscsi_kwqe_conn_update *update_wqe; - struct iscsi_kwqe_conn_update conn_update_kwqe; - - update_wqe = &conn_update_kwqe; - - update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN; - update_wqe->hdr.flags = - (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); - - /* 5771x requires conn context id to be passed as is */ - if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type)) - update_wqe->context_id = bnx2i_conn->ep->ep_cid; - else - update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7); - update_wqe->conn_flags = 0; - if (conn->hdrdgst_en) - update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST; - if (conn->datadgst_en) - update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST; - if (conn->session->initial_r2t_en) - update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T; - if (conn->session->imm_data_en) - update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA; - - update_wqe->max_send_pdu_length = conn->max_xmit_dlength; - update_wqe->max_recv_pdu_length = conn->max_recv_dlength; - update_wqe->first_burst_length = conn->session->first_burst; - update_wqe->max_burst_length = conn->session->max_burst; - update_wqe->exp_stat_sn = conn->exp_statsn; - update_wqe->max_outstanding_r2ts = conn->session->max_r2t; - update_wqe->session_error_recovery_level = conn->session->erl; - iscsi_conn_printk(KERN_ALERT, conn, - "bnx2i: conn update - MBL 0x%x FBL 0x%x" - "MRDSL_I 0x%x MRDSL_T 0x%x \n", - update_wqe->max_burst_length, - update_wqe->first_burst_length, - update_wqe->max_recv_pdu_length, - update_wqe->max_send_pdu_length); - - kwqe_arr[0] = (struct kwqe *) update_wqe; - if (hba->cnic && hba->cnic->submit_kwqes) - hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1); -} - - -/** - * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware - * @data: endpoint (transport handle) structure pointer - * - * routine to handle connection offload/destroy request timeout - */ -void bnx2i_ep_ofld_timer(unsigned long data) -{ - struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data; - - if (ep->state == EP_STATE_OFLD_START) { - printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n"); - ep->state = EP_STATE_OFLD_FAILED; - } else if (ep->state == EP_STATE_DISCONN_START) { - printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n"); - ep->state = EP_STATE_DISCONN_TIMEDOUT; - } else if (ep->state == EP_STATE_CLEANUP_START) { - printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n"); - ep->state = EP_STATE_CLEANUP_FAILED; - } - - wake_up_interruptible(&ep->ofld_wait); -} - - -static int bnx2i_power_of2(u32 val) -{ - u32 power = 0; - if (val & (val - 1)) - return power; - val--; - while (val) { - val = val >> 1; - power++; - } - return power; -} - - -/** - * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request - * @hba: adapter structure pointer - * @cmd: driver command structure which is requesting - * a WQE to sent to chip for further processing - * - * prepares and posts CONN_OFLD_REQ1/2 KWQE - */ -void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) -{ - struct bnx2i_cleanup_request *cmd_cleanup; - - cmd_cleanup = - (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe; - memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request)); - - cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST; - cmd_cleanup->itt = cmd->req.itt; - cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */ - - bnx2i_ring_dbell_update_sq_params(cmd->conn, 1); -} - - -/** - * bnx2i_send_conn_destroy - initiates iscsi connection teardown process - * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure - * - * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate - * iscsi connection context clean-up process - */ -void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) -{ - struct kwqe *kwqe_arr[2]; - struct iscsi_kwqe_conn_destroy conn_cleanup; - - memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy)); - - conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN; - conn_cleanup.hdr.flags = - (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); - /* 5771x requires conn context id to be passed as is */ - if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) - conn_cleanup.context_id = ep->ep_cid; - else - conn_cleanup.context_id = (ep->ep_cid >> 7); - - conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid; - - kwqe_arr[0] = (struct kwqe *) &conn_cleanup; - if (hba->cnic && hba->cnic->submit_kwqes) - hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1); -} - - -/** - * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process - * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure - * - * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE - */ -static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba, - struct bnx2i_endpoint *ep) -{ - struct kwqe *kwqe_arr[2]; - struct iscsi_kwqe_conn_offload1 ofld_req1; - struct iscsi_kwqe_conn_offload2 ofld_req2; - dma_addr_t dma_addr; - int num_kwqes = 2; - u32 *ptbl; - - ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; - ofld_req1.hdr.flags = - (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); - - ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid; - - dma_addr = ep->qp.sq_pgtbl_phys; - ofld_req1.sq_page_table_addr_lo = (u32) dma_addr; - ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); - - dma_addr = ep->qp.cq_pgtbl_phys; - ofld_req1.cq_page_table_addr_lo = (u32) dma_addr; - ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); - - ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2; - ofld_req2.hdr.flags = - (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); - - dma_addr = ep->qp.rq_pgtbl_phys; - ofld_req2.rq_page_table_addr_lo = (u32) dma_addr; - ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); - - ptbl = (u32 *) ep->qp.sq_pgtbl_virt; - - ofld_req2.sq_first_pte.hi = *ptbl++; - ofld_req2.sq_first_pte.lo = *ptbl; - - ptbl = (u32 *) ep->qp.cq_pgtbl_virt; - ofld_req2.cq_first_pte.hi = *ptbl++; - ofld_req2.cq_first_pte.lo = *ptbl; - - kwqe_arr[0] = (struct kwqe *) &ofld_req1; - kwqe_arr[1] = (struct kwqe *) &ofld_req2; - ofld_req2.num_additional_wqes = 0; - - if (hba->cnic && hba->cnic->submit_kwqes) - hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); -} - - -/** - * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation - * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure - * - * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE - */ -static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba, - struct bnx2i_endpoint *ep) -{ - struct kwqe *kwqe_arr[5]; - struct iscsi_kwqe_conn_offload1 ofld_req1; - struct iscsi_kwqe_conn_offload2 ofld_req2; - struct iscsi_kwqe_conn_offload3 ofld_req3[1]; - dma_addr_t dma_addr; - int num_kwqes = 2; - u32 *ptbl; - - ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1; - ofld_req1.hdr.flags = - (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); - - ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid; - - dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE; - ofld_req1.sq_page_table_addr_lo = (u32) dma_addr; - ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); - - dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE; - ofld_req1.cq_page_table_addr_lo = (u32) dma_addr; - ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); - - ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2; - ofld_req2.hdr.flags = - (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); - - dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE; - ofld_req2.rq_page_table_addr_lo = (u32) dma_addr; - ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32); - - ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); - ofld_req2.sq_first_pte.hi = *ptbl++; - ofld_req2.sq_first_pte.lo = *ptbl; - - ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); - ofld_req2.cq_first_pte.hi = *ptbl++; - ofld_req2.cq_first_pte.lo = *ptbl; - - kwqe_arr[0] = (struct kwqe *) &ofld_req1; - kwqe_arr[1] = (struct kwqe *) &ofld_req2; - - ofld_req2.num_additional_wqes = 1; - memset(ofld_req3, 0x00, sizeof(ofld_req3[0])); - ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); - ofld_req3[0].qp_first_pte[0].hi = *ptbl++; - ofld_req3[0].qp_first_pte[0].lo = *ptbl; - - kwqe_arr[2] = (struct kwqe *) ofld_req3; - /* need if we decide to go with multiple KCQE's per conn */ - num_kwqes += 1; - - if (hba->cnic && hba->cnic->submit_kwqes) - hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); -} - -/** - * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process - * - * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure - * - * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE - */ -void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) -{ - if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) - bnx2i_5771x_send_conn_ofld_req(hba, ep); - else - bnx2i_570x_send_conn_ofld_req(hba, ep); -} - - -/** - * setup_qp_page_tables - iscsi QP page table setup function - * @ep: endpoint (transport indentifier) structure - * - * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires - * 64-bit address in big endian format. Whereas 10G/sec (57710) requires - * PT in little endian format - */ -static void setup_qp_page_tables(struct bnx2i_endpoint *ep) -{ - int num_pages; - u32 *ptbl; - dma_addr_t page; - int cnic_dev_10g; - - if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) - cnic_dev_10g = 1; - else - cnic_dev_10g = 0; - - /* SQ page table */ - memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); - num_pages = ep->qp.sq_mem_size / PAGE_SIZE; - page = ep->qp.sq_phys; - - if (cnic_dev_10g) - ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE); - else - ptbl = (u32 *) ep->qp.sq_pgtbl_virt; - while (num_pages--) { - if (cnic_dev_10g) { - /* PTE is written in little endian format for 57710 */ - *ptbl = (u32) page; - ptbl++; - *ptbl = (u32) ((u64) page >> 32); - ptbl++; - page += PAGE_SIZE; - } else { - /* PTE is written in big endian format for - * 5706/5708/5709 devices */ - *ptbl = (u32) ((u64) page >> 32); - ptbl++; - *ptbl = (u32) page; - ptbl++; - page += PAGE_SIZE; - } - } - - /* RQ page table */ - memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); - num_pages = ep->qp.rq_mem_size / PAGE_SIZE; - page = ep->qp.rq_phys; - - if (cnic_dev_10g) - ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE); - else - ptbl = (u32 *) ep->qp.rq_pgtbl_virt; - while (num_pages--) { - if (cnic_dev_10g) { - /* PTE is written in little endian format for 57710 */ - *ptbl = (u32) page; - ptbl++; - *ptbl = (u32) ((u64) page >> 32); - ptbl++; - page += PAGE_SIZE; - } else { - /* PTE is written in big endian format for - * 5706/5708/5709 devices */ - *ptbl = (u32) ((u64) page >> 32); - ptbl++; - *ptbl = (u32) page; - ptbl++; - page += PAGE_SIZE; - } - } - - /* CQ page table */ - memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); - num_pages = ep->qp.cq_mem_size / PAGE_SIZE; - page = ep->qp.cq_phys; - - if (cnic_dev_10g) - ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE); - else - ptbl = (u32 *) ep->qp.cq_pgtbl_virt; - while (num_pages--) { - if (cnic_dev_10g) { - /* PTE is written in little endian format for 57710 */ - *ptbl = (u32) page; - ptbl++; - *ptbl = (u32) ((u64) page >> 32); - ptbl++; - page += PAGE_SIZE; - } else { - /* PTE is written in big endian format for - * 5706/5708/5709 devices */ - *ptbl = (u32) ((u64) page >> 32); - ptbl++; - *ptbl = (u32) page; - ptbl++; - page += PAGE_SIZE; - } - } -} - - -/** - * bnx2i_alloc_qp_resc - allocates required resources for QP. - * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure - * - * Allocate QP (transport layer for iSCSI connection) resources, DMA'able - * memory for SQ/RQ/CQ and page tables. EP structure elements such - * as producer/consumer indexes/pointers, queue sizes and page table - * contents are setup - */ -int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) -{ - struct bnx2i_5771x_cq_db *cq_db; - - ep->hba = hba; - ep->conn = NULL; - ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0; - - /* Allocate page table memory for SQ which is page aligned */ - ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; - ep->qp.sq_mem_size = - (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; - ep->qp.sq_pgtbl_size = - (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *); - ep->qp.sq_pgtbl_size = - (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; - - ep->qp.sq_pgtbl_virt = - dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, - &ep->qp.sq_pgtbl_phys, GFP_KERNEL); - if (!ep->qp.sq_pgtbl_virt) { - printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n", - ep->qp.sq_pgtbl_size); - goto mem_alloc_err; - } - - /* Allocate memory area for actual SQ element */ - ep->qp.sq_virt = - dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, - &ep->qp.sq_phys, GFP_KERNEL); - if (!ep->qp.sq_virt) { - printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n", - ep->qp.sq_mem_size); - goto mem_alloc_err; - } - - memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size); - ep->qp.sq_first_qe = ep->qp.sq_virt; - ep->qp.sq_prod_qe = ep->qp.sq_first_qe; - ep->qp.sq_cons_qe = ep->qp.sq_first_qe; - ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1]; - ep->qp.sq_prod_idx = 0; - ep->qp.sq_cons_idx = 0; - ep->qp.sqe_left = hba->max_sqes; - - /* Allocate page table memory for CQ which is page aligned */ - ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; - ep->qp.cq_mem_size = - (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; - ep->qp.cq_pgtbl_size = - (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *); - ep->qp.cq_pgtbl_size = - (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; - - ep->qp.cq_pgtbl_virt = - dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, - &ep->qp.cq_pgtbl_phys, GFP_KERNEL); - if (!ep->qp.cq_pgtbl_virt) { - printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n", - ep->qp.cq_pgtbl_size); - goto mem_alloc_err; - } - - /* Allocate memory area for actual CQ element */ - ep->qp.cq_virt = - dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, - &ep->qp.cq_phys, GFP_KERNEL); - if (!ep->qp.cq_virt) { - printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n", - ep->qp.cq_mem_size); - goto mem_alloc_err; - } - memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size); - - ep->qp.cq_first_qe = ep->qp.cq_virt; - ep->qp.cq_prod_qe = ep->qp.cq_first_qe; - ep->qp.cq_cons_qe = ep->qp.cq_first_qe; - ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1]; - ep->qp.cq_prod_idx = 0; - ep->qp.cq_cons_idx = 0; - ep->qp.cqe_left = hba->max_cqes; - ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN; - ep->qp.cqe_size = hba->max_cqes; - - /* Invalidate all EQ CQE index, req only for 57710 */ - cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt; - memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS); - - /* Allocate page table memory for RQ which is page aligned */ - ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; - ep->qp.rq_mem_size = - (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; - ep->qp.rq_pgtbl_size = - (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *); - ep->qp.rq_pgtbl_size = - (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; - - ep->qp.rq_pgtbl_virt = - dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, - &ep->qp.rq_pgtbl_phys, GFP_KERNEL); - if (!ep->qp.rq_pgtbl_virt) { - printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n", - ep->qp.rq_pgtbl_size); - goto mem_alloc_err; - } - - /* Allocate memory area for actual RQ element */ - ep->qp.rq_virt = - dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, - &ep->qp.rq_phys, GFP_KERNEL); - if (!ep->qp.rq_virt) { - printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n", - ep->qp.rq_mem_size); - goto mem_alloc_err; - } - - ep->qp.rq_first_qe = ep->qp.rq_virt; - ep->qp.rq_prod_qe = ep->qp.rq_first_qe; - ep->qp.rq_cons_qe = ep->qp.rq_first_qe; - ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1]; - ep->qp.rq_prod_idx = 0x8000; - ep->qp.rq_cons_idx = 0; - ep->qp.rqe_left = hba->max_rqes; - - setup_qp_page_tables(ep); - - return 0; - -mem_alloc_err: - bnx2i_free_qp_resc(hba, ep); - return -ENOMEM; -} - - - -/** - * bnx2i_free_qp_resc - free memory resources held by QP - * @hba: adapter structure pointer - * @ep: endpoint (transport indentifier) structure - * - * Free QP resources - SQ/RQ/CQ memory and page tables. - */ -void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) -{ - if (ep->qp.ctx_base) { - iounmap(ep->qp.ctx_base); - ep->qp.ctx_base = NULL; - } - /* Free SQ mem */ - if (ep->qp.sq_pgtbl_virt) { - dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, - ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys); - ep->qp.sq_pgtbl_virt = NULL; - ep->qp.sq_pgtbl_phys = 0; - } - if (ep->qp.sq_virt) { - dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size, - ep->qp.sq_virt, ep->qp.sq_phys); - ep->qp.sq_virt = NULL; - ep->qp.sq_phys = 0; - } - - /* Free RQ mem */ - if (ep->qp.rq_pgtbl_virt) { - dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, - ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys); - ep->qp.rq_pgtbl_virt = NULL; - ep->qp.rq_pgtbl_phys = 0; - } - if (ep->qp.rq_virt) { - dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size, - ep->qp.rq_virt, ep->qp.rq_phys); - ep->qp.rq_virt = NULL; - ep->qp.rq_phys = 0; - } - - /* Free CQ mem */ - if (ep->qp.cq_pgtbl_virt) { - dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, - ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys); - ep->qp.cq_pgtbl_virt = NULL; - ep->qp.cq_pgtbl_phys = 0; - } - if (ep->qp.cq_virt) { - dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size, - ep->qp.cq_virt, ep->qp.cq_phys); - ep->qp.cq_virt = NULL; - ep->qp.cq_phys = 0; - } -} - - -/** - * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w - * @hba: adapter structure pointer - * - * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w - * This results in iSCSi support validation and on-chip context manager - * initialization. Firmware completes this handshake with a CQE carrying - * the result of iscsi support validation. Parameter carried by - * iscsi init request determines the number of offloaded connection and - * tolerance level for iscsi protocol violation this hba/chip can support - */ -int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) -{ - struct kwqe *kwqe_arr[3]; - struct iscsi_kwqe_init1 iscsi_init; - struct iscsi_kwqe_init2 iscsi_init2; - int rc = 0; - u64 mask64; - - bnx2i_adjust_qp_size(hba); - - iscsi_init.flags = - ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; - if (en_tcp_dack) - iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE; - iscsi_init.reserved0 = 0; - iscsi_init.num_cqs = 1; - iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1; - iscsi_init.hdr.flags = - (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); - - iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; - iscsi_init.dummy_buffer_addr_hi = - (u32) ((u64) hba->dummy_buf_dma >> 32); - - hba->ctx_ccell_tasks = - ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); - iscsi_init.num_ccells_per_conn = hba->num_ccell; - iscsi_init.num_tasks_per_conn = hba->max_sqes; - iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; - iscsi_init.sq_num_wqes = hba->max_sqes; - iscsi_init.cq_log_wqes_per_page = - (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE); - iscsi_init.cq_num_wqes = hba->max_cqes; - iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + - (PAGE_SIZE - 1)) / PAGE_SIZE; - iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + - (PAGE_SIZE - 1)) / PAGE_SIZE; - iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE; - iscsi_init.rq_num_wqes = hba->max_rqes; - - - iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2; - iscsi_init2.hdr.flags = - (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT); - iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1; - mask64 = 0x0ULL; - mask64 |= ( - /* CISCO MDS */ - (1UL << - ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) | - /* HP MSA1510i */ - (1UL << - ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) | - /* EMC */ - (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN)); - if (error_mask1) - iscsi_init2.error_bit_map[0] = error_mask1; - else - iscsi_init2.error_bit_map[0] = (u32) mask64; - - if (error_mask2) - iscsi_init2.error_bit_map[1] = error_mask2; - else - iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32); - - iscsi_error_mask = mask64; - - kwqe_arr[0] = (struct kwqe *) &iscsi_init; - kwqe_arr[1] = (struct kwqe *) &iscsi_init2; - - if (hba->cnic && hba->cnic->submit_kwqes) - rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2); - return rc; -} - - -/** - * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion. - * @conn: iscsi connection - * @cqe: pointer to newly DMA'ed CQE entry for processing - * - * process SCSI CMD Response CQE & complete the request to SCSI-ML - */ -static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session, - struct bnx2i_conn *bnx2i_conn, - struct cqe *cqe) -{ - struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; - struct bnx2i_cmd_response *resp_cqe; - struct bnx2i_cmd *bnx2i_cmd; - struct iscsi_task *task; - struct iscsi_cmd_rsp *hdr; - u32 datalen = 0; - - resp_cqe = (struct bnx2i_cmd_response *)cqe; - spin_lock(&session->lock); - task = iscsi_itt_to_task(conn, - resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX); - if (!task) - goto fail; - - bnx2i_cmd = task->dd_data; - - if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) { - conn->datain_pdus_cnt += - resp_cqe->task_stat.read_stat.num_data_outs; - conn->rxdata_octets += - bnx2i_cmd->req.total_data_transfer_length; - } else { - conn->dataout_pdus_cnt += - resp_cqe->task_stat.read_stat.num_data_outs; - conn->r2t_pdus_cnt += - resp_cqe->task_stat.read_stat.num_r2ts; - conn->txdata_octets += - bnx2i_cmd->req.total_data_transfer_length; - } - bnx2i_iscsi_unmap_sg_list(bnx2i_cmd); - - hdr = (struct iscsi_cmd_rsp *)task->hdr; - resp_cqe = (struct bnx2i_cmd_response *)cqe; - hdr->opcode = resp_cqe->op_code; - hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn); - hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn); - hdr->response = resp_cqe->response; - hdr->cmd_status = resp_cqe->status; - hdr->flags = resp_cqe->response_flags; - hdr->residual_count = cpu_to_be32(resp_cqe->residual_count); - - if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN) - goto done; - - if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) { - datalen = resp_cqe->data_length; - if (datalen < 2) - goto done; - - if (datalen > BNX2I_RQ_WQE_SIZE) { - iscsi_conn_printk(KERN_ERR, conn, - "sense data len %d > RQ sz\n", - datalen); - datalen = BNX2I_RQ_WQE_SIZE; - } else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) { - iscsi_conn_printk(KERN_ERR, conn, - "sense data len %d > conn data\n", - datalen); - datalen = ISCSI_DEF_MAX_RECV_SEG_LEN; - } - - bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen); - bnx2i_put_rq_buf(bnx2i_cmd->conn, 1); - } - -done: - __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, - conn->data, datalen); -fail: - spin_unlock(&session->lock); - return 0; -} - - -/** - * bnx2i_process_login_resp - this function handles iscsi login response - * @session: iscsi session pointer - * @bnx2i_conn: iscsi connection pointer - * @cqe: pointer to newly DMA'ed CQE entry for processing - * - * process Login Response CQE & complete it to open-iscsi user daemon - */ -static int bnx2i_process_login_resp(struct iscsi_session *session, - struct bnx2i_conn *bnx2i_conn, - struct cqe *cqe) -{ - struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; - struct iscsi_task *task; - struct bnx2i_login_response *login; - struct iscsi_login_rsp *resp_hdr; - int pld_len; - int pad_len; - - login = (struct bnx2i_login_response *) cqe; - spin_lock(&session->lock); - task = iscsi_itt_to_task(conn, - login->itt & ISCSI_LOGIN_RESPONSE_INDEX); - if (!task) - goto done; - - resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; - memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); - resp_hdr->opcode = login->op_code; - resp_hdr->flags = login->response_flags; - resp_hdr->max_version = login->version_max; - resp_hdr->active_version = login->version_active;; - resp_hdr->hlength = 0; - - hton24(resp_hdr->dlength, login->data_length); - memcpy(resp_hdr->isid, &login->isid_lo, 6); - resp_hdr->tsih = cpu_to_be16(login->tsih); - resp_hdr->itt = task->hdr->itt; - resp_hdr->statsn = cpu_to_be32(login->stat_sn); - resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn); - resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn); - resp_hdr->status_class = login->status_class; - resp_hdr->status_detail = login->status_detail; - pld_len = login->data_length; - bnx2i_conn->gen_pdu.resp_wr_ptr = - bnx2i_conn->gen_pdu.resp_buf + pld_len; - - pad_len = 0; - if (pld_len & 0x3) - pad_len = 4 - (pld_len % 4); - - if (pad_len) { - int i = 0; - for (i = 0; i < pad_len; i++) { - bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0; - bnx2i_conn->gen_pdu.resp_wr_ptr++; - } - } - - __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, - bnx2i_conn->gen_pdu.resp_buf, - bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf); -done: - spin_unlock(&session->lock); - return 0; -} - -/** - * bnx2i_process_tmf_resp - this function handles iscsi TMF response - * @session: iscsi session pointer - * @bnx2i_conn: iscsi connection pointer - * @cqe: pointer to newly DMA'ed CQE entry for processing - * - * process iSCSI TMF Response CQE and wake up the driver eh thread. - */ -static int bnx2i_process_tmf_resp(struct iscsi_session *session, - struct bnx2i_conn *bnx2i_conn, - struct cqe *cqe) -{ - struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; - struct iscsi_task *task; - struct bnx2i_tmf_response *tmf_cqe; - struct iscsi_tm_rsp *resp_hdr; - - tmf_cqe = (struct bnx2i_tmf_response *)cqe; - spin_lock(&session->lock); - task = iscsi_itt_to_task(conn, - tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX); - if (!task) - goto done; - - resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; - memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); - resp_hdr->opcode = tmf_cqe->op_code; - resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn); - resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn); - resp_hdr->itt = task->hdr->itt; - resp_hdr->response = tmf_cqe->response; - - __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); -done: - spin_unlock(&session->lock); - return 0; -} - -/** - * bnx2i_process_logout_resp - this function handles iscsi logout response - * @session: iscsi session pointer - * @bnx2i_conn: iscsi connection pointer - * @cqe: pointer to newly DMA'ed CQE entry for processing - * - * process iSCSI Logout Response CQE & make function call to - * notify the user daemon. - */ -static int bnx2i_process_logout_resp(struct iscsi_session *session, - struct bnx2i_conn *bnx2i_conn, - struct cqe *cqe) -{ - struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; - struct iscsi_task *task; - struct bnx2i_logout_response *logout; - struct iscsi_logout_rsp *resp_hdr; - - logout = (struct bnx2i_logout_response *) cqe; - spin_lock(&session->lock); - task = iscsi_itt_to_task(conn, - logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX); - if (!task) - goto done; - - resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr; - memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); - resp_hdr->opcode = logout->op_code; - resp_hdr->flags = logout->response; - resp_hdr->hlength = 0; - - resp_hdr->itt = task->hdr->itt; - resp_hdr->statsn = task->hdr->exp_statsn; - resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn); - resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn); - - resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait); - resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain); - - __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0); -done: - spin_unlock(&session->lock); - return 0; -} - -/** - * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE - * @session: iscsi session pointer - * @bnx2i_conn: iscsi connection pointer - * @cqe: pointer to newly DMA'ed CQE entry for processing - * - * process iSCSI NOPIN local completion CQE, frees IIT and command structures - */ -static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session, - struct bnx2i_conn *bnx2i_conn, - struct cqe *cqe) -{ - struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; - struct bnx2i_nop_in_msg *nop_in; - struct iscsi_task *task; - - nop_in = (struct bnx2i_nop_in_msg *)cqe; - spin_lock(&session->lock); - task = iscsi_itt_to_task(conn, - nop_in->itt & ISCSI_NOP_IN_MSG_INDEX); - if (task) - iscsi_put_task(task); - spin_unlock(&session->lock); -} - -/** - * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd - * @conn: iscsi connection - * - * Firmware advances RQ producer index for every unsolicited PDU even if - * payload data length is '0'. This function makes corresponding - * adjustments on the driver side to match this f/w behavior - */ -static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn) -{ - char dummy_rq_data[2]; - bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1); - bnx2i_put_rq_buf(bnx2i_conn, 1); -} - - -/** - * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE - * @session: iscsi session pointer - * @bnx2i_conn: iscsi connection pointer - * @cqe: pointer to newly DMA'ed CQE entry for processing - * - * process iSCSI target's proactive iSCSI NOPIN request - */ -static int bnx2i_process_nopin_mesg(struct iscsi_session *session, - struct bnx2i_conn *bnx2i_conn, - struct cqe *cqe) -{ - struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; - struct iscsi_task *task; - struct bnx2i_nop_in_msg *nop_in; - struct iscsi_nopin *hdr; - u32 itt; - int tgt_async_nop = 0; - - nop_in = (struct bnx2i_nop_in_msg *)cqe; - itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX; - - spin_lock(&session->lock); - hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr; - memset(hdr, 0, sizeof(struct iscsi_hdr)); - hdr->opcode = nop_in->op_code; - hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn); - hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn); - hdr->ttt = cpu_to_be32(nop_in->ttt); - - if (itt == (u16) RESERVED_ITT) { - bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); - hdr->itt = RESERVED_ITT; - tgt_async_nop = 1; - goto done; - } - - /* this is a response to one of our nop-outs */ - task = iscsi_itt_to_task(conn, itt); - if (task) { - hdr->flags = ISCSI_FLAG_CMD_FINAL; - hdr->itt = task->hdr->itt; - hdr->ttt = cpu_to_be32(nop_in->ttt); - memcpy(hdr->lun, nop_in->lun, 8); - } -done: - __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); - spin_unlock(&session->lock); - - return tgt_async_nop; -} - - -/** - * bnx2i_process_async_mesg - this function handles iscsi async message - * @session: iscsi session pointer - * @bnx2i_conn: iscsi connection pointer - * @cqe: pointer to newly DMA'ed CQE entry for processing - * - * process iSCSI ASYNC Message - */ -static void bnx2i_process_async_mesg(struct iscsi_session *session, - struct bnx2i_conn *bnx2i_conn, - struct cqe *cqe) -{ - struct bnx2i_async_msg *async_cqe; - struct iscsi_async *resp_hdr; - u8 async_event; - - bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); - - async_cqe = (struct bnx2i_async_msg *)cqe; - async_event = async_cqe->async_event; - - if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) { - iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, - "async: scsi events not supported\n"); - return; - } - - spin_lock(&session->lock); - resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr; - memset(resp_hdr, 0, sizeof(struct iscsi_hdr)); - resp_hdr->opcode = async_cqe->op_code; - resp_hdr->flags = 0x80; - - memcpy(resp_hdr->lun, async_cqe->lun, 8); - resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn); - resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn); - - resp_hdr->async_event = async_cqe->async_event; - resp_hdr->async_vcode = async_cqe->async_vcode; - - resp_hdr->param1 = cpu_to_be16(async_cqe->param1); - resp_hdr->param2 = cpu_to_be16(async_cqe->param2); - resp_hdr->param3 = cpu_to_be16(async_cqe->param3); - - __iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data, - (struct iscsi_hdr *)resp_hdr, NULL, 0); - spin_unlock(&session->lock); -} - - -/** - * bnx2i_process_reject_mesg - process iscsi reject pdu - * @session: iscsi session pointer - * @bnx2i_conn: iscsi connection pointer - * @cqe: pointer to newly DMA'ed CQE entry for processing - * - * process iSCSI REJECT message - */ -static void bnx2i_process_reject_mesg(struct iscsi_session *session, - struct bnx2i_conn *bnx2i_conn, - struct cqe *cqe) -{ - struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; - struct bnx2i_reject_msg *reject; - struct iscsi_reject *hdr; - - reject = (struct bnx2i_reject_msg *) cqe; - if (reject->data_length) { - bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length); - bnx2i_put_rq_buf(bnx2i_conn, 1); - } else - bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); - - spin_lock(&session->lock); - hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr; - memset(hdr, 0, sizeof(struct iscsi_hdr)); - hdr->opcode = reject->op_code; - hdr->reason = reject->reason; - hton24(hdr->dlength, reject->data_length); - hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn); - hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn); - hdr->ffffffff = cpu_to_be32(RESERVED_ITT); - __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data, - reject->data_length); - spin_unlock(&session->lock); -} - -/** - * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion - * @session: iscsi session pointer - * @bnx2i_conn: iscsi connection pointer - * @cqe: pointer to newly DMA'ed CQE entry for processing - * - * process command cleanup response CQE during conn shutdown or error recovery - */ -static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session, - struct bnx2i_conn *bnx2i_conn, - struct cqe *cqe) -{ - struct bnx2i_cleanup_response *cmd_clean_rsp; - struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; - struct iscsi_task *task; - - cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe; - spin_lock(&session->lock); - task = iscsi_itt_to_task(conn, - cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); - if (!task) - printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n", - cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); - spin_unlock(&session->lock); - complete(&bnx2i_conn->cmd_cleanup_cmpl); -} - - - -/** - * bnx2i_process_new_cqes - process newly DMA'ed CQE's - * @bnx2i_conn: iscsi connection - * - * this function is called by generic KCQ handler to process all pending CQE's - */ -static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn) -{ - struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data; - struct iscsi_session *session = conn->session; - struct qp_info *qp = &bnx2i_conn->ep->qp; - struct bnx2i_nop_in_msg *nopin; - int tgt_async_msg; - - while (1) { - nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe; - if (nopin->cq_req_sn != qp->cqe_exp_seq_sn) - break; - - if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) - break; - - tgt_async_msg = 0; - - switch (nopin->op_code) { - case ISCSI_OP_SCSI_CMD_RSP: - case ISCSI_OP_SCSI_DATA_IN: - bnx2i_process_scsi_cmd_resp(session, bnx2i_conn, - qp->cq_cons_qe); - break; - case ISCSI_OP_LOGIN_RSP: - bnx2i_process_login_resp(session, bnx2i_conn, - qp->cq_cons_qe); - break; - case ISCSI_OP_SCSI_TMFUNC_RSP: - bnx2i_process_tmf_resp(session, bnx2i_conn, - qp->cq_cons_qe); - break; - case ISCSI_OP_LOGOUT_RSP: - bnx2i_process_logout_resp(session, bnx2i_conn, - qp->cq_cons_qe); - break; - case ISCSI_OP_NOOP_IN: - if (bnx2i_process_nopin_mesg(session, bnx2i_conn, - qp->cq_cons_qe)) - tgt_async_msg = 1; - break; - case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION: - bnx2i_process_nopin_local_cmpl(session, bnx2i_conn, - qp->cq_cons_qe); - break; - case ISCSI_OP_ASYNC_EVENT: - bnx2i_process_async_mesg(session, bnx2i_conn, - qp->cq_cons_qe); - tgt_async_msg = 1; - break; - case ISCSI_OP_REJECT: - bnx2i_process_reject_mesg(session, bnx2i_conn, - qp->cq_cons_qe); - break; - case ISCSI_OPCODE_CLEANUP_RESPONSE: - bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn, - qp->cq_cons_qe); - break; - default: - printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", - nopin->op_code); - } - - if (!tgt_async_msg) - bnx2i_conn->ep->num_active_cmds--; - - /* clear out in production version only, till beta keep opcode - * field intact, will be helpful in debugging (context dump) - * nopin->op_code = 0; - */ - qp->cqe_exp_seq_sn++; - if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1)) - qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN; - - if (qp->cq_cons_qe == qp->cq_last_qe) { - qp->cq_cons_qe = qp->cq_first_qe; - qp->cq_cons_idx = 0; - } else { - qp->cq_cons_qe++; - qp->cq_cons_idx++; - } - } - bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); -} - -/** - * bnx2i_fastpath_notification - process global event queue (KCQ) - * @hba: adapter structure pointer - * @new_cqe_kcqe: pointer to newly DMA'ed KCQE entry - * - * Fast path event notification handler, KCQ entry carries context id - * of the connection that has 1 or more pending CQ entries - */ -static void bnx2i_fastpath_notification(struct bnx2i_hba *hba, - struct iscsi_kcqe *new_cqe_kcqe) -{ - struct bnx2i_conn *conn; - u32 iscsi_cid; - - iscsi_cid = new_cqe_kcqe->iscsi_conn_id; - conn = bnx2i_get_conn_from_id(hba, iscsi_cid); - - if (!conn) { - printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid); - return; - } - if (!conn->ep) { - printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid); - return; - } - - bnx2i_process_new_cqes(conn); -} - - -/** - * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE - * @hba: adapter structure pointer - * @update_kcqe: kcqe pointer - * - * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration - */ -static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba, - struct iscsi_kcqe *update_kcqe) -{ - struct bnx2i_conn *conn; - u32 iscsi_cid; - - iscsi_cid = update_kcqe->iscsi_conn_id; - conn = bnx2i_get_conn_from_id(hba, iscsi_cid); - - if (!conn) { - printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid); - return; - } - if (!conn->ep) { - printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid); - return; - } - - if (update_kcqe->completion_status) { - printk(KERN_ALERT "request failed cid %x\n", iscsi_cid); - conn->ep->state = EP_STATE_ULP_UPDATE_FAILED; - } else - conn->ep->state = EP_STATE_ULP_UPDATE_COMPL; - - wake_up_interruptible(&conn->ep->ofld_wait); -} - - -/** - * bnx2i_recovery_que_add_conn - add connection to recovery queue - * @hba: adapter structure pointer - * @bnx2i_conn: iscsi connection - * - * Add connection to recovery queue and schedule adapter eh worker - */ -static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba, - struct bnx2i_conn *bnx2i_conn) -{ - iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data, - ISCSI_ERR_CONN_FAILED); -} - - -/** - * bnx2i_process_tcp_error - process error notification on a given connection - * - * @hba: adapter structure pointer - * @tcp_err: tcp error kcqe pointer - * - * handles tcp level error notifications from FW. - */ -static void bnx2i_process_tcp_error(struct bnx2i_hba *hba, - struct iscsi_kcqe *tcp_err) -{ - struct bnx2i_conn *bnx2i_conn; - u32 iscsi_cid; - - iscsi_cid = tcp_err->iscsi_conn_id; - bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); - - if (!bnx2i_conn) { - printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid); - return; - } - - printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n", - iscsi_cid, tcp_err->completion_status); - bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn); -} - - -/** - * bnx2i_process_iscsi_error - process error notification on a given connection - * @hba: adapter structure pointer - * @iscsi_err: iscsi error kcqe pointer - * - * handles iscsi error notifications from the FW. Firmware based in initial - * handshake classifies iscsi protocol / TCP rfc violation into either - * warning or error indications. If indication is of "Error" type, driver - * will initiate session recovery for that connection/session. For - * "Warning" type indication, driver will put out a system log message - * (there will be only one message for each type for the life of the - * session, this is to avoid un-necessarily overloading the system) - */ -static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba, - struct iscsi_kcqe *iscsi_err) -{ - struct bnx2i_conn *bnx2i_conn; - u32 iscsi_cid; - char warn_notice[] = "iscsi_warning"; - char error_notice[] = "iscsi_error"; - char additional_notice[64]; - char *message; - int need_recovery; - u64 err_mask64; - - iscsi_cid = iscsi_err->iscsi_conn_id; - bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid); - if (!bnx2i_conn) { - printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid); - return; - } - - err_mask64 = (0x1ULL << iscsi_err->completion_status); - - if (err_mask64 & iscsi_error_mask) { - need_recovery = 0; - message = warn_notice; - } else { - need_recovery = 1; - message = error_notice; - } - - switch (iscsi_err->completion_status) { - case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR: - strcpy(additional_notice, "hdr digest err"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR: - strcpy(additional_notice, "data digest err"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE: - strcpy(additional_notice, "wrong opcode rcvd"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN: - strcpy(additional_notice, "AHS len > 0 rcvd"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT: - strcpy(additional_notice, "invalid ITT rcvd"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN: - strcpy(additional_notice, "wrong StatSN rcvd"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN: - strcpy(additional_notice, "wrong DataSN rcvd"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T: - strcpy(additional_notice, "pend R2T violation"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0: - strcpy(additional_notice, "ERL0, UO"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1: - strcpy(additional_notice, "ERL0, U1"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2: - strcpy(additional_notice, "ERL0, U2"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3: - strcpy(additional_notice, "ERL0, U3"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4: - strcpy(additional_notice, "ERL0, U4"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5: - strcpy(additional_notice, "ERL0, U5"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6: - strcpy(additional_notice, "ERL0, U6"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN: - strcpy(additional_notice, "invalid resi len"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN: - strcpy(additional_notice, "MRDSL violation"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO: - strcpy(additional_notice, "F-bit not set"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV: - strcpy(additional_notice, "invalid TTT"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN: - strcpy(additional_notice, "invalid DataSN"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN: - strcpy(additional_notice, "burst len violation"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF: - strcpy(additional_notice, "buf offset violation"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN: - strcpy(additional_notice, "invalid LUN field"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN: - strcpy(additional_notice, "invalid R2TSN field"); - break; -#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0 \ - ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 - case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0: - strcpy(additional_notice, "invalid cmd len1"); - break; -#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1 \ - ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 - case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1: - strcpy(additional_notice, "invalid cmd len2"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED: - strcpy(additional_notice, - "pend r2t exceeds MaxOutstandingR2T value"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV: - strcpy(additional_notice, "TTT is rsvd"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN: - strcpy(additional_notice, "MBL violation"); - break; -#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO \ - ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO - case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO: - strcpy(additional_notice, "data seg len != 0"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN: - strcpy(additional_notice, "reject pdu len error"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN: - strcpy(additional_notice, "async pdu len error"); - break; - case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN: - strcpy(additional_notice, "nopin pdu len error"); - break; -#define BNX2_ERR_PEND_R2T_IN_CLEANUP \ - ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP - case BNX2_ERR_PEND_R2T_IN_CLEANUP: - strcpy(additional_notice, "pend r2t in cleanup"); - break; - - case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT: - strcpy(additional_notice, "IP fragments rcvd"); - break; - case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS: - strcpy(additional_notice, "IP options error"); - break; - case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG: - strcpy(additional_notice, "urgent flag error"); - break; - default: - printk(KERN_ALERT "iscsi_err - unknown err %x\n", - iscsi_err->completion_status); - } - - if (need_recovery) { - iscsi_conn_printk(KERN_ALERT, - bnx2i_conn->cls_conn->dd_data, - "bnx2i: %s - %s\n", - message, additional_notice); - - iscsi_conn_printk(KERN_ALERT, - bnx2i_conn->cls_conn->dd_data, - "conn_err - hostno %d conn %p, " - "iscsi_cid %x cid %x\n", - bnx2i_conn->hba->shost->host_no, - bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid, - bnx2i_conn->ep->ep_cid); - bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn); - } else - if (!test_and_set_bit(iscsi_err->completion_status, - (void *) &bnx2i_conn->violation_notified)) - iscsi_conn_printk(KERN_ALERT, - bnx2i_conn->cls_conn->dd_data, - "bnx2i: %s - %s\n", - message, additional_notice); -} - - -/** - * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion - * @hba: adapter structure pointer - * @conn_destroy: conn destroy kcqe pointer - * - * handles connection destroy completion request. - */ -static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba, - struct iscsi_kcqe *conn_destroy) -{ - struct bnx2i_endpoint *ep; - - ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id); - if (!ep) { - printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending " - "offload request, unexpected complection\n"); - return; - } - - if (hba != ep->hba) { - printk(KERN_ALERT "conn destroy- error hba mis-match\n"); - return; - } - - if (conn_destroy->completion_status) { - printk(KERN_ALERT "conn_destroy_cmpl: op failed\n"); - ep->state = EP_STATE_CLEANUP_FAILED; - } else - ep->state = EP_STATE_CLEANUP_CMPL; - wake_up_interruptible(&ep->ofld_wait); -} - - -/** - * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion - * @hba: adapter structure pointer - * @ofld_kcqe: conn offload kcqe pointer - * - * handles initial connection offload completion, ep_connect() thread is - * woken-up to continue with LLP connect process - */ -static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba, - struct iscsi_kcqe *ofld_kcqe) -{ - u32 cid_addr; - struct bnx2i_endpoint *ep; - u32 cid_num; - - ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id); - if (!ep) { - printk(KERN_ALERT "ofld_cmpl: no pend offload request\n"); - return; - } - - if (hba != ep->hba) { - printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n"); - return; - } - - if (ofld_kcqe->completion_status) { - if (ofld_kcqe->completion_status == - ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) - printk(KERN_ALERT "bnx2i: unable to allocate" - " iSCSI context resources\n"); - ep->state = EP_STATE_OFLD_FAILED; - } else { - ep->state = EP_STATE_OFLD_COMPL; - cid_addr = ofld_kcqe->iscsi_conn_context_id; - cid_num = bnx2i_get_cid_num(ep); - ep->ep_cid = cid_addr; - ep->qp.ctx_base = NULL; - } - wake_up_interruptible(&ep->ofld_wait); -} - -/** - * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE - * @hba: adapter structure pointer - * @update_kcqe: kcqe pointer - * - * Generic KCQ event handler/dispatcher - */ -static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[], - u32 num_cqe) -{ - struct bnx2i_hba *hba = context; - int i = 0; - struct iscsi_kcqe *ikcqe = NULL; - - while (i < num_cqe) { - ikcqe = (struct iscsi_kcqe *) kcqe[i++]; - - if (ikcqe->op_code == - ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION) - bnx2i_fastpath_notification(hba, ikcqe); - else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN) - bnx2i_process_ofld_cmpl(hba, ikcqe); - else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN) - bnx2i_process_update_conn_cmpl(hba, ikcqe); - else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) { - if (ikcqe->completion_status != - ISCSI_KCQE_COMPLETION_STATUS_SUCCESS) - bnx2i_iscsi_license_error(hba, ikcqe->\ - completion_status); - else { - set_bit(ADAPTER_STATE_UP, &hba->adapter_state); - bnx2i_get_link_state(hba); - printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: " - "ISCSI_INIT passed\n", - (u8)hba->pcidev->bus->number, - hba->pci_devno, - (u8)hba->pci_func); - - - } - } else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN) - bnx2i_process_conn_destroy_cmpl(hba, ikcqe); - else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR) - bnx2i_process_iscsi_error(hba, ikcqe); - else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR) - bnx2i_process_tcp_error(hba, ikcqe); - else - printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n", - ikcqe->op_code); - } -} - - -/** - * bnx2i_indicate_netevent - Generic netdev event handler - * @context: adapter structure pointer - * @event: event type - * - * Handles four netdev events, NETDEV_UP, NETDEV_DOWN, - * NETDEV_GOING_DOWN and NETDEV_CHANGE - */ -static void bnx2i_indicate_netevent(void *context, unsigned long event) -{ - struct bnx2i_hba *hba = context; - - switch (event) { - case NETDEV_UP: - if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) - bnx2i_send_fw_iscsi_init_msg(hba); - break; - case NETDEV_DOWN: - clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); - clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); - break; - case NETDEV_GOING_DOWN: - set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); - iscsi_host_for_each_session(hba->shost, - bnx2i_drop_session); - break; - case NETDEV_CHANGE: - bnx2i_get_link_state(hba); - break; - default: - ; - } -} - - -/** - * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion - * @cm_sk: cnic sock structure pointer - * - * function callback exported via bnx2i - cnic driver interface to - * indicate completion of option-2 TCP connect request. - */ -static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk) -{ - struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; - - if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state)) - ep->state = EP_STATE_CONNECT_FAILED; - else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags)) - ep->state = EP_STATE_CONNECT_COMPL; - else - ep->state = EP_STATE_CONNECT_FAILED; - - wake_up_interruptible(&ep->ofld_wait); -} - - -/** - * bnx2i_cm_close_cmpl - process tcp conn close completion - * @cm_sk: cnic sock structure pointer - * - * function callback exported via bnx2i - cnic driver interface to - * indicate completion of option-2 graceful TCP connect shutdown - */ -static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk) -{ - struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; - - ep->state = EP_STATE_DISCONN_COMPL; - wake_up_interruptible(&ep->ofld_wait); -} - - -/** - * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion - * @cm_sk: cnic sock structure pointer - * - * function callback exported via bnx2i - cnic driver interface to - * indicate completion of option-2 abortive TCP connect termination - */ -static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk) -{ - struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; - - ep->state = EP_STATE_DISCONN_COMPL; - wake_up_interruptible(&ep->ofld_wait); -} - - -/** - * bnx2i_cm_remote_close - process received TCP FIN - * @hba: adapter structure pointer - * @update_kcqe: kcqe pointer - * - * function callback exported via bnx2i - cnic driver interface to indicate - * async TCP events such as FIN - */ -static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk) -{ - struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; - - ep->state = EP_STATE_TCP_FIN_RCVD; - if (ep->conn) - bnx2i_recovery_que_add_conn(ep->hba, ep->conn); -} - -/** - * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup - * @hba: adapter structure pointer - * @update_kcqe: kcqe pointer - * - * function callback exported via bnx2i - cnic driver interface to - * indicate async TCP events (RST) sent by the peer. - */ -static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk) -{ - struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context; - - ep->state = EP_STATE_TCP_RST_RCVD; - if (ep->conn) - bnx2i_recovery_que_add_conn(ep->hba, ep->conn); -} - - -static void bnx2i_send_nl_mesg(struct cnic_dev *dev, u32 msg_type, - char *buf, u16 buflen) -{ - struct bnx2i_hba *hba; - - hba = bnx2i_find_hba_for_cnic(dev); - if (!hba) - return; - - if (iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport, - msg_type, buf, buflen)) - printk(KERN_ALERT "bnx2i: private nl message send error\n"); - -} - - -/** - * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure - * carrying callback function pointers - * - */ -struct cnic_ulp_ops bnx2i_cnic_cb = { - .cnic_init = bnx2i_ulp_init, - .cnic_exit = bnx2i_ulp_exit, - .cnic_start = bnx2i_start, - .cnic_stop = bnx2i_stop, - .indicate_kcqes = bnx2i_indicate_kcqe, - .indicate_netevent = bnx2i_indicate_netevent, - .cm_connect_complete = bnx2i_cm_connect_cmpl, - .cm_close_complete = bnx2i_cm_close_cmpl, - .cm_abort_complete = bnx2i_cm_abort_cmpl, - .cm_remote_close = bnx2i_cm_remote_close, - .cm_remote_abort = bnx2i_cm_remote_abort, - .iscsi_nl_send_msg = bnx2i_send_nl_mesg, - .owner = THIS_MODULE -}; - - -/** - * bnx2i_map_ep_dbell_regs - map connection doorbell registers - * @ep: bnx2i endpoint - * - * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these - * register in BAR #0. Whereas in 57710 these register are accessed by - * mapping BAR #1 - */ -int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) -{ - u32 cid_num; - u32 reg_off; - u32 first_l4l5; - u32 ctx_sz; - u32 config2; - resource_size_t reg_base; - - cid_num = bnx2i_get_cid_num(ep); - - if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) { - reg_base = pci_resource_start(ep->hba->pcidev, - BNX2X_DOORBELL_PCI_BAR); - reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE; - ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); - goto arm_cq; - } - - reg_base = ep->hba->netdev->base_addr; - if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) && - (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) { - config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2); - first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5; - ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3; - if (ctx_sz) - reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE - + PAGE_SIZE * - (((cid_num - first_l4l5) / ctx_sz) + 256); - else - reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); - } else - /* 5709 device in normal node and 5706/5708 devices */ - reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); - - ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, - MB_KERNEL_CTX_SIZE); - if (!ep->qp.ctx_base) - return -ENOMEM; - -arm_cq: - bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE); - return 0; -} diff --git a/trunk/drivers/scsi/bnx2i/bnx2i_init.c b/trunk/drivers/scsi/bnx2i/bnx2i_init.c deleted file mode 100644 index ae4b2d588fd3..000000000000 --- a/trunk/drivers/scsi/bnx2i/bnx2i_init.c +++ /dev/null @@ -1,438 +0,0 @@ -/* bnx2i.c: Broadcom NetXtreme II iSCSI driver. - * - * Copyright (c) 2006 - 2009 Broadcom Corporation - * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. - * Copyright (c) 2007, 2008 Mike Christie - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) - */ - -#include "bnx2i.h" - -static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list); -static u32 adapter_count; -static int bnx2i_reg_device; - -#define DRV_MODULE_NAME "bnx2i" -#define DRV_MODULE_VERSION "2.0.1d" -#define DRV_MODULE_RELDATE "Mar 25, 2009" - -static char version[] __devinitdata = - "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \ - " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; - - -MODULE_AUTHOR("Anil Veerabhadrappa "); -MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_MODULE_VERSION); - -static DEFINE_RWLOCK(bnx2i_dev_lock); - -unsigned int event_coal_div = 1; -module_param(event_coal_div, int, 0664); -MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor"); - -unsigned int en_tcp_dack = 1; -module_param(en_tcp_dack, int, 0664); -MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK"); - -unsigned int error_mask1 = 0x00; -module_param(error_mask1, int, 0664); -MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1"); - -unsigned int error_mask2 = 0x00; -module_param(error_mask2, int, 0664); -MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2"); - -unsigned int sq_size; -module_param(sq_size, int, 0664); -MODULE_PARM_DESC(sq_size, "Configure SQ size"); - -unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT; -module_param(rq_size, int, 0664); -MODULE_PARM_DESC(rq_size, "Configure RQ size"); - -u64 iscsi_error_mask = 0x00; - -static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ; - - -/** - * bnx2i_identify_device - identifies NetXtreme II device type - * @hba: Adapter structure pointer - * - * This function identifies the NX2 device type and sets appropriate - * queue mailbox register access method, 5709 requires driver to - * access MBOX regs using *bin* mode - */ -void bnx2i_identify_device(struct bnx2i_hba *hba) -{ - hba->cnic_dev_type = 0; - if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) || - (hba->pci_did == PCI_DEVICE_ID_NX2_5706S)) - set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type); - else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) || - (hba->pci_did == PCI_DEVICE_ID_NX2_5708S)) - set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type); - else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) || - (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) { - set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type); - hba->mail_queue_access = BNX2I_MQ_BIN_MODE; - } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 || - hba->pci_did == PCI_DEVICE_ID_NX2_57711) - set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type); -} - - -/** - * get_adapter_list_head - returns head of adapter list - */ -struct bnx2i_hba *get_adapter_list_head(void) -{ - struct bnx2i_hba *hba = NULL; - struct bnx2i_hba *tmp_hba; - - if (!adapter_count) - goto hba_not_found; - - read_lock(&bnx2i_dev_lock); - list_for_each_entry(tmp_hba, &adapter_list, link) { - if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) { - hba = tmp_hba; - break; - } - } - read_unlock(&bnx2i_dev_lock); -hba_not_found: - return hba; -} - - -/** - * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance - * @cnic: pointer to cnic device instance - * - */ -struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic) -{ - struct bnx2i_hba *hba, *temp; - - read_lock(&bnx2i_dev_lock); - list_for_each_entry_safe(hba, temp, &adapter_list, link) { - if (hba->cnic == cnic) { - read_unlock(&bnx2i_dev_lock); - return hba; - } - } - read_unlock(&bnx2i_dev_lock); - return NULL; -} - - -/** - * bnx2i_start - cnic callback to initialize & start adapter instance - * @handle: transparent handle pointing to adapter structure - * - * This function maps adapter structure to pcidev structure and initiates - * firmware handshake to enable/initialize on chip iscsi components - * This bnx2i - cnic interface api callback is issued after following - * 2 conditions are met - - * a) underlying network interface is up (marked by event 'NETDEV_UP' - * from netdev - * b) bnx2i adapter instance is registered - */ -void bnx2i_start(void *handle) -{ -#define BNX2I_INIT_POLL_TIME (1000 / HZ) - struct bnx2i_hba *hba = handle; - int i = HZ; - - bnx2i_send_fw_iscsi_init_msg(hba); - while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) - msleep(BNX2I_INIT_POLL_TIME); -} - - -/** - * bnx2i_stop - cnic callback to shutdown adapter instance - * @handle: transparent handle pointing to adapter structure - * - * driver checks if adapter is already in shutdown mode, if not start - * the shutdown process - */ -void bnx2i_stop(void *handle) -{ - struct bnx2i_hba *hba = handle; - - /* check if cleanup happened in GOING_DOWN context */ - clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); - if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN, - &hba->adapter_state)) - iscsi_host_for_each_session(hba->shost, - bnx2i_drop_session); -} - -/** - * bnx2i_register_device - register bnx2i adapter instance with the cnic driver - * @hba: Adapter instance to register - * - * registers bnx2i adapter instance with the cnic driver while holding the - * adapter structure lock - */ -void bnx2i_register_device(struct bnx2i_hba *hba) -{ - if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || - test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { - return; - } - - hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba); - - spin_lock(&hba->lock); - bnx2i_reg_device++; - spin_unlock(&hba->lock); - - set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); -} - - -/** - * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver - * - * registers all bnx2i adapter instances with the cnic driver while holding - * the global resource lock - */ -void bnx2i_reg_dev_all(void) -{ - struct bnx2i_hba *hba, *temp; - - read_lock(&bnx2i_dev_lock); - list_for_each_entry_safe(hba, temp, &adapter_list, link) - bnx2i_register_device(hba); - read_unlock(&bnx2i_dev_lock); -} - - -/** - * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver - * @hba: Adapter instance to unregister - * - * registers bnx2i adapter instance with the cnic driver while holding - * the adapter structure lock - */ -static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) -{ - if (hba->ofld_conns_active || - !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) || - test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state)) - return; - - hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); - - spin_lock(&hba->lock); - bnx2i_reg_device--; - spin_unlock(&hba->lock); - - /* ep_disconnect could come before NETDEV_DOWN, driver won't - * see NETDEV_DOWN as it already unregistered itself. - */ - hba->adapter_state = 0; - clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); -} - -/** - * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver - * - * unregisters all bnx2i adapter instances with the cnic driver while holding - * the global resource lock - */ -void bnx2i_unreg_dev_all(void) -{ - struct bnx2i_hba *hba, *temp; - - read_lock(&bnx2i_dev_lock); - list_for_each_entry_safe(hba, temp, &adapter_list, link) - bnx2i_unreg_one_device(hba); - read_unlock(&bnx2i_dev_lock); -} - - -/** - * bnx2i_init_one - initialize an adapter instance and allocate memory resources - * @hba: bnx2i adapter instance - * @cnic: cnic device handle - * - * Global resource lock and host adapter lock is held during critical sections - * below. This routine is called from cnic_register_driver() context and - * work horse thread which does majority of device specific initialization - */ -static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic) -{ - int rc; - - read_lock(&bnx2i_dev_lock); - if (bnx2i_reg_device && - !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { - rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba); - if (rc) /* duplicate registration */ - printk(KERN_ERR "bnx2i- dev reg failed\n"); - - spin_lock(&hba->lock); - bnx2i_reg_device++; - hba->age++; - spin_unlock(&hba->lock); - - set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); - } - read_unlock(&bnx2i_dev_lock); - - write_lock(&bnx2i_dev_lock); - list_add_tail(&hba->link, &adapter_list); - adapter_count++; - write_unlock(&bnx2i_dev_lock); - return 0; -} - - -/** - * bnx2i_ulp_init - initialize an adapter instance - * @dev: cnic device handle - * - * Called from cnic_register_driver() context to initialize all enumerated - * cnic devices. This routine allocate adapter structure and other - * device specific resources. - */ -void bnx2i_ulp_init(struct cnic_dev *dev) -{ - struct bnx2i_hba *hba; - - /* Allocate a HBA structure for this device */ - hba = bnx2i_alloc_hba(dev); - if (!hba) { - printk(KERN_ERR "bnx2i init: hba initialization failed\n"); - return; - } - - /* Get PCI related information and update hba struct members */ - clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); - if (bnx2i_init_one(hba, dev)) { - printk(KERN_ERR "bnx2i - hba %p init failed\n", hba); - bnx2i_free_hba(hba); - } else - hba->cnic = dev; -} - - -/** - * bnx2i_ulp_exit - shuts down adapter instance and frees all resources - * @dev: cnic device handle - * - */ -void bnx2i_ulp_exit(struct cnic_dev *dev) -{ - struct bnx2i_hba *hba; - - hba = bnx2i_find_hba_for_cnic(dev); - if (!hba) { - printk(KERN_INFO "bnx2i_ulp_exit: hba not " - "found, dev 0x%p\n", dev); - return; - } - write_lock(&bnx2i_dev_lock); - list_del_init(&hba->link); - adapter_count--; - - if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { - hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); - clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); - - spin_lock(&hba->lock); - bnx2i_reg_device--; - spin_unlock(&hba->lock); - } - write_unlock(&bnx2i_dev_lock); - - bnx2i_free_hba(hba); -} - - -/** - * bnx2i_mod_init - module init entry point - * - * initialize any driver wide global data structures such as endpoint pool, - * tcp port manager/queue, sysfs. finally driver will register itself - * with the cnic module - */ -static int __init bnx2i_mod_init(void) -{ - int err; - - printk(KERN_INFO "%s", version); - - if (!is_power_of_2(sq_size)) - sq_size = roundup_pow_of_two(sq_size); - - bnx2i_scsi_xport_template = - iscsi_register_transport(&bnx2i_iscsi_transport); - if (!bnx2i_scsi_xport_template) { - printk(KERN_ERR "Could not register bnx2i transport.\n"); - err = -ENOMEM; - goto out; - } - - err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb); - if (err) { - printk(KERN_ERR "Could not register bnx2i cnic driver.\n"); - goto unreg_xport; - } - - return 0; - -unreg_xport: - iscsi_unregister_transport(&bnx2i_iscsi_transport); -out: - return err; -} - - -/** - * bnx2i_mod_exit - module cleanup/exit entry point - * - * Global resource lock and host adapter lock is held during critical sections - * in this function. Driver will browse through the adapter list, cleans-up - * each instance, unregisters iscsi transport name and finally driver will - * unregister itself with the cnic module - */ -static void __exit bnx2i_mod_exit(void) -{ - struct bnx2i_hba *hba; - - write_lock(&bnx2i_dev_lock); - while (!list_empty(&adapter_list)) { - hba = list_entry(adapter_list.next, struct bnx2i_hba, link); - list_del(&hba->link); - adapter_count--; - - if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { - hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI); - clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic); - bnx2i_reg_device--; - } - - write_unlock(&bnx2i_dev_lock); - bnx2i_free_hba(hba); - write_lock(&bnx2i_dev_lock); - } - write_unlock(&bnx2i_dev_lock); - - iscsi_unregister_transport(&bnx2i_iscsi_transport); - cnic_unregister_driver(CNIC_ULP_ISCSI); -} - -module_init(bnx2i_mod_init); -module_exit(bnx2i_mod_exit); diff --git a/trunk/drivers/scsi/bnx2i/bnx2i_iscsi.c b/trunk/drivers/scsi/bnx2i/bnx2i_iscsi.c deleted file mode 100644 index f7412196f2f8..000000000000 --- a/trunk/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ /dev/null @@ -1,2064 +0,0 @@ -/* - * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver. - * - * Copyright (c) 2006 - 2009 Broadcom Corporation - * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved. - * Copyright (c) 2007, 2008 Mike Christie - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) - */ - -#include -#include -#include "bnx2i.h" - -struct scsi_transport_template *bnx2i_scsi_xport_template; -struct iscsi_transport bnx2i_iscsi_transport; -static struct scsi_host_template bnx2i_host_template; - -/* - * Global endpoint resource info - */ -static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */ - - -static int bnx2i_adapter_ready(struct bnx2i_hba *hba) -{ - int retval = 0; - - if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) || - test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) || - test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state)) - retval = -EPERM; - return retval; -} - -/** - * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks - * @cmd: iscsi cmd struct pointer - * @buf_off: absolute buffer offset - * @start_bd_off: u32 pointer to return the offset within the BD - * indicated by 'start_bd_idx' on which 'buf_off' falls - * @start_bd_idx: index of the BD on which 'buf_off' falls - * - * identifies & marks various bd info for scsi command's imm data, - * unsolicited data and the first solicited data seq. - */ -static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off, - u32 *start_bd_off, u32 *start_bd_idx) -{ - struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl; - u32 cur_offset = 0; - u32 cur_bd_idx = 0; - - if (buf_off) { - while (buf_off >= (cur_offset + bd_tbl->buffer_length)) { - cur_offset += bd_tbl->buffer_length; - cur_bd_idx++; - bd_tbl++; - } - } - - *start_bd_off = buf_off - cur_offset; - *start_bd_idx = cur_bd_idx; -} - -/** - * bnx2i_setup_write_cmd_bd_info - sets up BD various information - * @task: transport layer's cmd struct pointer - * - * identifies & marks various bd info for scsi command's immediate data, - * unsolicited data and first solicited data seq which includes BD start - * index & BD buf off. his function takes into account iscsi parameter such - * as immediate data and unsolicited data is support on this connection. - */ -static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task) -{ - struct bnx2i_cmd *cmd = task->dd_data; - u32 start_bd_offset; - u32 start_bd_idx; - u32 buffer_offset = 0; - u32 cmd_len = cmd->req.total_data_transfer_length; - - /* if ImmediateData is turned off & IntialR2T is turned on, - * there will be no immediate or unsolicited data, just return. - */ - if (!iscsi_task_has_unsol_data(task) && !task->imm_count) - return; - - /* Immediate data */ - buffer_offset += task->imm_count; - if (task->imm_count == cmd_len) - return; - - if (iscsi_task_has_unsol_data(task)) { - bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, - &start_bd_offset, &start_bd_idx); - cmd->req.ud_buffer_offset = start_bd_offset; - cmd->req.ud_start_bd_index = start_bd_idx; - buffer_offset += task->unsol_r2t.data_length; - } - - if (buffer_offset != cmd_len) { - bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset, - &start_bd_offset, &start_bd_idx); - if ((start_bd_offset > task->conn->session->first_burst) || - (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) { - int i = 0; - - iscsi_conn_printk(KERN_ALERT, task->conn, - "bnx2i- error, buf offset 0x%x " - "bd_valid %d use_sg %d\n", - buffer_offset, cmd->io_tbl.bd_valid, - scsi_sg_count(cmd->scsi_cmd)); - for (i = 0; i < cmd->io_tbl.bd_valid; i++) - iscsi_conn_printk(KERN_ALERT, task->conn, - "bnx2i err, bd[%d]: len %x\n", - i, cmd->io_tbl.bd_tbl[i].\ - buffer_length); - } - cmd->req.sd_buffer_offset = start_bd_offset; - cmd->req.sd_start_bd_index = start_bd_idx; - } -} - - - -/** - * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table - * @hba: adapter instance - * @cmd: iscsi cmd struct pointer - * - * map SG list - */ -static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd) -{ - struct scsi_cmnd *sc = cmd->scsi_cmd; - struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; - struct scatterlist *sg; - int byte_count = 0; - int bd_count = 0; - int sg_count; - int sg_len; - u64 addr; - int i; - - BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD); - - sg_count = scsi_dma_map(sc); - - scsi_for_each_sg(sc, sg, sg_count, i) { - sg_len = sg_dma_len(sg); - addr = (u64) sg_dma_address(sg); - bd[bd_count].buffer_addr_lo = addr & 0xffffffff; - bd[bd_count].buffer_addr_hi = addr >> 32; - bd[bd_count].buffer_length = sg_len; - bd[bd_count].flags = 0; - if (bd_count == 0) - bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN; - - byte_count += sg_len; - bd_count++; - } - - if (bd_count) - bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN; - - BUG_ON(byte_count != scsi_bufflen(sc)); - return bd_count; -} - -/** - * bnx2i_iscsi_map_sg_list - maps SG list - * @cmd: iscsi cmd struct pointer - * - * creates BD list table for the command - */ -static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd) -{ - int bd_count; - - bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd); - if (!bd_count) { - struct iscsi_bd *bd = cmd->io_tbl.bd_tbl; - - bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0; - bd[0].buffer_length = bd[0].flags = 0; - } - cmd->io_tbl.bd_valid = bd_count; -} - - -/** - * bnx2i_iscsi_unmap_sg_list - unmaps SG list - * @cmd: iscsi cmd struct pointer - * - * unmap IO buffers and invalidate the BD table - */ -void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd) -{ - struct scsi_cmnd *sc = cmd->scsi_cmd; - - if (cmd->io_tbl.bd_valid && sc) { - scsi_dma_unmap(sc); - cmd->io_tbl.bd_valid = 0; - } -} - -static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd) -{ - memset(&cmd->req, 0x00, sizeof(cmd->req)); - cmd->req.op_code = 0xFF; - cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma; - cmd->req.bd_list_addr_hi = - (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32); - -} - - -/** - * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid' - * @hba: pointer to adapter instance - * @conn: pointer to iscsi connection - * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) - * - * update iscsi cid table entry with connection pointer. This enables - * driver to quickly get hold of connection structure pointer in - * completion/interrupt thread using iscsi context ID - */ -static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba, - struct bnx2i_conn *bnx2i_conn, - u32 iscsi_cid) -{ - if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) { - iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, - "conn bind - entry #%d not free\n", iscsi_cid); - return -EBUSY; - } - - hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn; - return 0; -} - - -/** - * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr - * @hba: pointer to adapter instance - * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1) - */ -struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba, - u16 iscsi_cid) -{ - if (!hba->cid_que.conn_cid_tbl) { - printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n"); - return NULL; - - } else if (iscsi_cid >= hba->max_active_conns) { - printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid); - return NULL; - } - return hba->cid_que.conn_cid_tbl[iscsi_cid]; -} - - -/** - * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool - * @hba: pointer to adapter instance - */ -static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba) -{ - int idx; - - if (!hba->cid_que.cid_free_cnt) - return -1; - - idx = hba->cid_que.cid_q_cons_idx; - hba->cid_que.cid_q_cons_idx++; - if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx) - hba->cid_que.cid_q_cons_idx = 0; - - hba->cid_que.cid_free_cnt--; - return hba->cid_que.cid_que[idx]; -} - - -/** - * bnx2i_free_iscsi_cid - returns tcp port to free list - * @hba: pointer to adapter instance - * @iscsi_cid: iscsi context ID to free - */ -static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid) -{ - int idx; - - if (iscsi_cid == (u16) -1) - return; - - hba->cid_que.cid_free_cnt++; - - idx = hba->cid_que.cid_q_prod_idx; - hba->cid_que.cid_que[idx] = iscsi_cid; - hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL; - hba->cid_que.cid_q_prod_idx++; - if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx) - hba->cid_que.cid_q_prod_idx = 0; -} - - -/** - * bnx2i_setup_free_cid_que - sets up free iscsi cid queue - * @hba: pointer to adapter instance - * - * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table, - * and initialize table attributes - */ -static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba) -{ - int mem_size; - int i; - - mem_size = hba->max_active_conns * sizeof(u32); - mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; - - hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL); - if (!hba->cid_que.cid_que_base) - return -ENOMEM; - - mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *); - mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; - hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL); - if (!hba->cid_que.conn_cid_tbl) { - kfree(hba->cid_que.cid_que_base); - hba->cid_que.cid_que_base = NULL; - return -ENOMEM; - } - - hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base; - hba->cid_que.cid_q_prod_idx = 0; - hba->cid_que.cid_q_cons_idx = 0; - hba->cid_que.cid_q_max_idx = hba->max_active_conns; - hba->cid_que.cid_free_cnt = hba->max_active_conns; - - for (i = 0; i < hba->max_active_conns; i++) { - hba->cid_que.cid_que[i] = i; - hba->cid_que.conn_cid_tbl[i] = NULL; - } - return 0; -} - - -/** - * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources - * @hba: pointer to adapter instance - */ -static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba) -{ - kfree(hba->cid_que.cid_que_base); - hba->cid_que.cid_que_base = NULL; - - kfree(hba->cid_que.conn_cid_tbl); - hba->cid_que.conn_cid_tbl = NULL; -} - - -/** - * bnx2i_alloc_ep - allocates ep structure from global pool - * @hba: pointer to adapter instance - * - * routine allocates a free endpoint structure from global pool and - * a tcp port to be used for this connection. Global resource lock, - * 'bnx2i_resc_lock' is held while accessing shared global data structures - */ -static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba) -{ - struct iscsi_endpoint *ep; - struct bnx2i_endpoint *bnx2i_ep; - - ep = iscsi_create_endpoint(sizeof(*bnx2i_ep)); - if (!ep) { - printk(KERN_ERR "bnx2i: Could not allocate ep\n"); - return NULL; - } - - bnx2i_ep = ep->dd_data; - INIT_LIST_HEAD(&bnx2i_ep->link); - bnx2i_ep->state = EP_STATE_IDLE; - bnx2i_ep->hba = hba; - bnx2i_ep->hba_age = hba->age; - hba->ofld_conns_active++; - init_waitqueue_head(&bnx2i_ep->ofld_wait); - return ep; -} - - -/** - * bnx2i_free_ep - free endpoint - * @ep: pointer to iscsi endpoint structure - */ -static void bnx2i_free_ep(struct iscsi_endpoint *ep) -{ - struct bnx2i_endpoint *bnx2i_ep = ep->dd_data; - unsigned long flags; - - spin_lock_irqsave(&bnx2i_resc_lock, flags); - bnx2i_ep->state = EP_STATE_IDLE; - bnx2i_ep->hba->ofld_conns_active--; - - bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid); - if (bnx2i_ep->conn) { - bnx2i_ep->conn->ep = NULL; - bnx2i_ep->conn = NULL; - } - - bnx2i_ep->hba = NULL; - spin_unlock_irqrestore(&bnx2i_resc_lock, flags); - iscsi_destroy_endpoint(ep); -} - - -/** - * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command - * @hba: adapter instance pointer - * @session: iscsi session pointer - * @cmd: iscsi command structure - */ -static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session, - struct bnx2i_cmd *cmd) -{ - struct io_bdt *io = &cmd->io_tbl; - struct iscsi_bd *bd; - - io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, - ISCSI_MAX_BDS_PER_CMD * sizeof(*bd), - &io->bd_tbl_dma, GFP_KERNEL); - if (!io->bd_tbl) { - iscsi_session_printk(KERN_ERR, session, "Could not " - "allocate bdt.\n"); - return -ENOMEM; - } - io->bd_valid = 0; - return 0; -} - -/** - * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table - * @hba: adapter instance pointer - * @session: iscsi session pointer - * @cmd: iscsi command structure - */ -static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba, - struct iscsi_session *session) -{ - int i; - - for (i = 0; i < session->cmds_max; i++) { - struct iscsi_task *task = session->cmds[i]; - struct bnx2i_cmd *cmd = task->dd_data; - - if (cmd->io_tbl.bd_tbl) - dma_free_coherent(&hba->pcidev->dev, - ISCSI_MAX_BDS_PER_CMD * - sizeof(struct iscsi_bd), - cmd->io_tbl.bd_tbl, - cmd->io_tbl.bd_tbl_dma); - } - -} - - -/** - * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session - * @hba: adapter instance pointer - * @session: iscsi session pointer - */ -static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba, - struct iscsi_session *session) -{ - int i; - - for (i = 0; i < session->cmds_max; i++) { - struct iscsi_task *task = session->cmds[i]; - struct bnx2i_cmd *cmd = task->dd_data; - - /* Anil */ - task->hdr = &cmd->hdr; - task->hdr_max = sizeof(struct iscsi_hdr); - - if (bnx2i_alloc_bdt(hba, session, cmd)) - goto free_bdts; - } - - return 0; - -free_bdts: - bnx2i_destroy_cmd_pool(hba, session); - return -ENOMEM; -} - - -/** - * bnx2i_setup_mp_bdt - allocate BD table resources - * @hba: pointer to adapter structure - * - * Allocate memory for dummy buffer and associated BD - * table to be used by middle path (MP) requests - */ -static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) -{ - int rc = 0; - struct iscsi_bd *mp_bdt; - u64 addr; - - hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, - &hba->mp_bd_dma, GFP_KERNEL); - if (!hba->mp_bd_tbl) { - printk(KERN_ERR "unable to allocate Middle Path BDT\n"); - rc = -1; - goto out; - } - - hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, - &hba->dummy_buf_dma, GFP_KERNEL); - if (!hba->dummy_buffer) { - printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, - hba->mp_bd_tbl, hba->mp_bd_dma); - hba->mp_bd_tbl = NULL; - rc = -1; - goto out; - } - - mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl; - addr = (unsigned long) hba->dummy_buf_dma; - mp_bdt->buffer_addr_lo = addr & 0xffffffff; - mp_bdt->buffer_addr_hi = addr >> 32; - mp_bdt->buffer_length = PAGE_SIZE; - mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | - ISCSI_BD_FIRST_IN_BD_CHAIN; -out: - return rc; -} - - -/** - * bnx2i_free_mp_bdt - releases ITT back to free pool - * @hba: pointer to adapter instance - * - * free MP dummy buffer and associated BD table - */ -static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) -{ - if (hba->mp_bd_tbl) { - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, - hba->mp_bd_tbl, hba->mp_bd_dma); - hba->mp_bd_tbl = NULL; - } - if (hba->dummy_buffer) { - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, - hba->dummy_buffer, hba->dummy_buf_dma); - hba->dummy_buffer = NULL; - } - return; -} - -/** - * bnx2i_drop_session - notifies iscsid of connection error. - * @hba: adapter instance pointer - * @session: iscsi session pointer - * - * This notifies iscsid that there is a error, so it can initiate - * recovery. - * - * This relies on caller using the iscsi class iterator so the object - * is refcounted and does not disapper from under us. - */ -void bnx2i_drop_session(struct iscsi_cls_session *cls_session) -{ - iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); -} - -/** - * bnx2i_ep_destroy_list_add - add an entry to EP destroy list - * @hba: pointer to adapter instance - * @ep: pointer to endpoint (transport indentifier) structure - * - * EP destroy queue manager - */ -static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba, - struct bnx2i_endpoint *ep) -{ - write_lock_bh(&hba->ep_rdwr_lock); - list_add_tail(&ep->link, &hba->ep_destroy_list); - write_unlock_bh(&hba->ep_rdwr_lock); - return 0; -} - -/** - * bnx2i_ep_destroy_list_del - add an entry to EP destroy list - * - * @hba: pointer to adapter instance - * @ep: pointer to endpoint (transport indentifier) structure - * - * EP destroy queue manager - */ -static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba, - struct bnx2i_endpoint *ep) -{ - write_lock_bh(&hba->ep_rdwr_lock); - list_del_init(&ep->link); - write_unlock_bh(&hba->ep_rdwr_lock); - - return 0; -} - -/** - * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list - * @hba: pointer to adapter instance - * @ep: pointer to endpoint (transport indentifier) structure - * - * pending conn offload completion queue manager - */ -static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba, - struct bnx2i_endpoint *ep) -{ - write_lock_bh(&hba->ep_rdwr_lock); - list_add_tail(&ep->link, &hba->ep_ofld_list); - write_unlock_bh(&hba->ep_rdwr_lock); - return 0; -} - -/** - * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list - * @hba: pointer to adapter instance - * @ep: pointer to endpoint (transport indentifier) structure - * - * pending conn offload completion queue manager - */ -static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba, - struct bnx2i_endpoint *ep) -{ - write_lock_bh(&hba->ep_rdwr_lock); - list_del_init(&ep->link); - write_unlock_bh(&hba->ep_rdwr_lock); - return 0; -} - - -/** - * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints - * - * @hba: pointer to adapter instance - * @iscsi_cid: iscsi context ID to find - * - */ -struct bnx2i_endpoint * -bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid) -{ - struct list_head *list; - struct list_head *tmp; - struct bnx2i_endpoint *ep; - - read_lock_bh(&hba->ep_rdwr_lock); - list_for_each_safe(list, tmp, &hba->ep_ofld_list) { - ep = (struct bnx2i_endpoint *)list; - - if (ep->ep_iscsi_cid == iscsi_cid) - break; - ep = NULL; - } - read_unlock_bh(&hba->ep_rdwr_lock); - - if (!ep) - printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); - return ep; -} - - -/** - * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list - * @hba: pointer to adapter instance - * @iscsi_cid: iscsi context ID to find - * - */ -struct bnx2i_endpoint * -bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid) -{ - struct list_head *list; - struct list_head *tmp; - struct bnx2i_endpoint *ep; - - read_lock_bh(&hba->ep_rdwr_lock); - list_for_each_safe(list, tmp, &hba->ep_destroy_list) { - ep = (struct bnx2i_endpoint *)list; - - if (ep->ep_iscsi_cid == iscsi_cid) - break; - ep = NULL; - } - read_unlock_bh(&hba->ep_rdwr_lock); - - if (!ep) - printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid); - - return ep; -} - -/** - * bnx2i_setup_host_queue_size - assigns shost->can_queue param - * @hba: pointer to adapter instance - * @shost: scsi host pointer - * - * Initializes 'can_queue' parameter based on how many outstanding commands - * the device can handle. Each device 5708/5709/57710 has different - * capabilities - */ -static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba, - struct Scsi_Host *shost) -{ - if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type)) - shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; - else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) - shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709; - else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) - shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710; - else - shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708; -} - - -/** - * bnx2i_alloc_hba - allocate and init adapter instance - * @cnic: cnic device pointer - * - * allocate & initialize adapter structure and call other - * support routines to do per adapter initialization - */ -struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic) -{ - struct Scsi_Host *shost; - struct bnx2i_hba *hba; - - shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0); - if (!shost) - return NULL; - shost->dma_boundary = cnic->pcidev->dma_mask; - shost->transportt = bnx2i_scsi_xport_template; - shost->max_id = ISCSI_MAX_CONNS_PER_HBA; - shost->max_channel = 0; - shost->max_lun = 512; - shost->max_cmd_len = 16; - - hba = iscsi_host_priv(shost); - hba->shost = shost; - hba->netdev = cnic->netdev; - /* Get PCI related information and update hba struct members */ - hba->pcidev = cnic->pcidev; - pci_dev_get(hba->pcidev); - hba->pci_did = hba->pcidev->device; - hba->pci_vid = hba->pcidev->vendor; - hba->pci_sdid = hba->pcidev->subsystem_device; - hba->pci_svid = hba->pcidev->subsystem_vendor; - hba->pci_func = PCI_FUNC(hba->pcidev->devfn); - hba->pci_devno = PCI_SLOT(hba->pcidev->devfn); - bnx2i_identify_device(hba); - - bnx2i_identify_device(hba); - bnx2i_setup_host_queue_size(hba, shost); - - if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) { - hba->regview = ioremap_nocache(hba->netdev->base_addr, - BNX2_MQ_CONFIG2); - if (!hba->regview) - goto ioreg_map_err; - } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { - hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096); - if (!hba->regview) - goto ioreg_map_err; - } - - if (bnx2i_setup_mp_bdt(hba)) - goto mp_bdt_mem_err; - - INIT_LIST_HEAD(&hba->ep_ofld_list); - INIT_LIST_HEAD(&hba->ep_destroy_list); - rwlock_init(&hba->ep_rdwr_lock); - - hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED; - - /* different values for 5708/5709/57710 */ - hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA; - - if (bnx2i_setup_free_cid_que(hba)) - goto cid_que_err; - - /* SQ/RQ/CQ size can be changed via sysfx interface */ - if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { - if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX) - hba->max_sqes = sq_size; - else - hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT; - } else { /* 5706/5708/5709 */ - if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX) - hba->max_sqes = sq_size; - else - hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT; - } - - hba->max_rqes = rq_size; - hba->max_cqes = hba->max_sqes + rq_size; - if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) { - if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX) - hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX; - } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX) - hba->max_cqes = BNX2I_570X_CQ_WQES_MAX; - - hba->num_ccell = hba->max_sqes / 2; - - spin_lock_init(&hba->lock); - mutex_init(&hba->net_dev_lock); - - if (iscsi_host_add(shost, &hba->pcidev->dev)) - goto free_dump_mem; - return hba; - -free_dump_mem: - bnx2i_release_free_cid_que(hba); -cid_que_err: - bnx2i_free_mp_bdt(hba); -mp_bdt_mem_err: - if (hba->regview) { - iounmap(hba->regview); - hba->regview = NULL; - } -ioreg_map_err: - pci_dev_put(hba->pcidev); - scsi_host_put(shost); - return NULL; -} - -/** - * bnx2i_free_hba- releases hba structure and resources held by the adapter - * @hba: pointer to adapter instance - * - * free adapter structure and call various cleanup routines. - */ -void bnx2i_free_hba(struct bnx2i_hba *hba) -{ - struct Scsi_Host *shost = hba->shost; - - iscsi_host_remove(shost); - INIT_LIST_HEAD(&hba->ep_ofld_list); - INIT_LIST_HEAD(&hba->ep_destroy_list); - pci_dev_put(hba->pcidev); - - if (hba->regview) { - iounmap(hba->regview); - hba->regview = NULL; - } - bnx2i_free_mp_bdt(hba); - bnx2i_release_free_cid_que(hba); - iscsi_host_free(shost); -} - -/** - * bnx2i_conn_free_login_resources - free DMA resources used for login process - * @hba: pointer to adapter instance - * @bnx2i_conn: iscsi connection pointer - * - * Login related resources, mostly BDT & payload DMA memory is freed - */ -static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba, - struct bnx2i_conn *bnx2i_conn) -{ - if (bnx2i_conn->gen_pdu.resp_bd_tbl) { - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, - bnx2i_conn->gen_pdu.resp_bd_tbl, - bnx2i_conn->gen_pdu.resp_bd_dma); - bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; - } - - if (bnx2i_conn->gen_pdu.req_bd_tbl) { - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, - bnx2i_conn->gen_pdu.req_bd_tbl, - bnx2i_conn->gen_pdu.req_bd_dma); - bnx2i_conn->gen_pdu.req_bd_tbl = NULL; - } - - if (bnx2i_conn->gen_pdu.resp_buf) { - dma_free_coherent(&hba->pcidev->dev, - ISCSI_DEF_MAX_RECV_SEG_LEN, - bnx2i_conn->gen_pdu.resp_buf, - bnx2i_conn->gen_pdu.resp_dma_addr); - bnx2i_conn->gen_pdu.resp_buf = NULL; - } - - if (bnx2i_conn->gen_pdu.req_buf) { - dma_free_coherent(&hba->pcidev->dev, - ISCSI_DEF_MAX_RECV_SEG_LEN, - bnx2i_conn->gen_pdu.req_buf, - bnx2i_conn->gen_pdu.req_dma_addr); - bnx2i_conn->gen_pdu.req_buf = NULL; - } -} - -/** - * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop. - * @hba: pointer to adapter instance - * @bnx2i_conn: iscsi connection pointer - * - * Mgmt task DNA resources are allocated in this routine. - */ -static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, - struct bnx2i_conn *bnx2i_conn) -{ - /* Allocate memory for login request/response buffers */ - bnx2i_conn->gen_pdu.req_buf = - dma_alloc_coherent(&hba->pcidev->dev, - ISCSI_DEF_MAX_RECV_SEG_LEN, - &bnx2i_conn->gen_pdu.req_dma_addr, - GFP_KERNEL); - if (bnx2i_conn->gen_pdu.req_buf == NULL) - goto login_req_buf_failure; - - bnx2i_conn->gen_pdu.req_buf_size = 0; - bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf; - - bnx2i_conn->gen_pdu.resp_buf = - dma_alloc_coherent(&hba->pcidev->dev, - ISCSI_DEF_MAX_RECV_SEG_LEN, - &bnx2i_conn->gen_pdu.resp_dma_addr, - GFP_KERNEL); - if (bnx2i_conn->gen_pdu.resp_buf == NULL) - goto login_resp_buf_failure; - - bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN; - bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; - - bnx2i_conn->gen_pdu.req_bd_tbl = - dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, - &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); - if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) - goto login_req_bd_tbl_failure; - - bnx2i_conn->gen_pdu.resp_bd_tbl = - dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, - &bnx2i_conn->gen_pdu.resp_bd_dma, - GFP_KERNEL); - if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) - goto login_resp_bd_tbl_failure; - - return 0; - -login_resp_bd_tbl_failure: - dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, - bnx2i_conn->gen_pdu.req_bd_tbl, - bnx2i_conn->gen_pdu.req_bd_dma); - bnx2i_conn->gen_pdu.req_bd_tbl = NULL; - -login_req_bd_tbl_failure: - dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, - bnx2i_conn->gen_pdu.resp_buf, - bnx2i_conn->gen_pdu.resp_dma_addr); - bnx2i_conn->gen_pdu.resp_buf = NULL; -login_resp_buf_failure: - dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN, - bnx2i_conn->gen_pdu.req_buf, - bnx2i_conn->gen_pdu.req_dma_addr); - bnx2i_conn->gen_pdu.req_buf = NULL; -login_req_buf_failure: - iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data, - "login resource alloc failed!!\n"); - return -ENOMEM; - -} - - -/** - * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table. - * @bnx2i_conn: iscsi connection pointer - * - * Allocates buffers and BD tables before shipping requests to cnic - * for PDUs prepared by 'iscsid' daemon - */ -static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn) -{ - struct iscsi_bd *bd_tbl; - - bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl; - - bd_tbl->buffer_addr_hi = - (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32); - bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr; - bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr - - bnx2i_conn->gen_pdu.req_buf; - bd_tbl->reserved0 = 0; - bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | - ISCSI_BD_FIRST_IN_BD_CHAIN; - - bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl; - bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32; - bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr; - bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN; - bd_tbl->reserved0 = 0; - bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN | - ISCSI_BD_FIRST_IN_BD_CHAIN; -} - - -/** - * bnx2i_iscsi_send_generic_request - called to send mgmt tasks. - * @task: transport layer task pointer - * - * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login, - * Nop-out and Logout requests flow through this path. - */ -static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task) -{ - struct bnx2i_cmd *cmd = task->dd_data; - struct bnx2i_conn *bnx2i_conn = cmd->conn; - int rc = 0; - char *buf; - int data_len; - - bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn); - switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { - case ISCSI_OP_LOGIN: - bnx2i_send_iscsi_login(bnx2i_conn, task); - break; - case ISCSI_OP_NOOP_OUT: - data_len = bnx2i_conn->gen_pdu.req_buf_size; - buf = bnx2i_conn->gen_pdu.req_buf; - if (data_len) - rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, - RESERVED_ITT, - buf, data_len, 1); - else - rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task, - RESERVED_ITT, - NULL, 0, 1); - break; - case ISCSI_OP_LOGOUT: - rc = bnx2i_send_iscsi_logout(bnx2i_conn, task); - break; - case ISCSI_OP_SCSI_TMFUNC: - rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task); - break; - default: - iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data, - "send_gen: unsupported op 0x%x\n", - task->hdr->opcode); - } - return rc; -} - - -/********************************************************************** - * SCSI-ML Interface - **********************************************************************/ - -/** - * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe - * @sc: SCSI-ML command pointer - * @cmd: iscsi cmd pointer - */ -static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd) -{ - u32 dword; - int lpcnt; - u8 *srcp; - u32 *dstp; - u32 scsi_lun[2]; - - int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun); - cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]); - cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]); - - lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword); - srcp = (u8 *) sc->cmnd; - dstp = (u32 *) cmd->req.cdb; - while (lpcnt--) { - memcpy(&dword, (const void *) srcp, 4); - *dstp = cpu_to_be32(dword); - srcp += 4; - dstp++; - } - if (sc->cmd_len & 0x3) { - dword = (u32) srcp[0] | ((u32) srcp[1] << 8); - *dstp = cpu_to_be32(dword); - } -} - -static void bnx2i_cleanup_task(struct iscsi_task *task) -{ - struct iscsi_conn *conn = task->conn; - struct bnx2i_conn *bnx2i_conn = conn->dd_data; - struct bnx2i_hba *hba = bnx2i_conn->hba; - - /* - * mgmt task or cmd was never sent to us to transmit. - */ - if (!task->sc || task->state == ISCSI_TASK_PENDING) - return; - /* - * need to clean-up task context to claim dma buffers - */ - if (task->state == ISCSI_TASK_ABRT_TMF) { - bnx2i_send_cmd_cleanup_req(hba, task->dd_data); - - spin_unlock_bh(&conn->session->lock); - wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl, - msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT)); - spin_lock_bh(&conn->session->lock); - } - bnx2i_iscsi_unmap_sg_list(task->dd_data); -} - -/** - * bnx2i_mtask_xmit - transmit mtask to chip for further processing - * @conn: transport layer conn structure pointer - * @task: transport layer command structure pointer - */ -static int -bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) -{ - struct bnx2i_conn *bnx2i_conn = conn->dd_data; - struct bnx2i_cmd *cmd = task->dd_data; - - memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN); - - bnx2i_setup_cmd_wqe_template(cmd); - bnx2i_conn->gen_pdu.req_buf_size = task->data_count; - if (task->data_count) { - memcpy(bnx2i_conn->gen_pdu.req_buf, task->data, - task->data_count); - bnx2i_conn->gen_pdu.req_wr_ptr = - bnx2i_conn->gen_pdu.req_buf + task->data_count; - } - cmd->conn = conn->dd_data; - cmd->scsi_cmd = NULL; - return bnx2i_iscsi_send_generic_request(task); -} - -/** - * bnx2i_task_xmit - transmit iscsi command to chip for further processing - * @task: transport layer command structure pointer - * - * maps SG buffers and send request to chip/firmware in the form of SQ WQE - */ -static int bnx2i_task_xmit(struct iscsi_task *task) -{ - struct iscsi_conn *conn = task->conn; - struct iscsi_session *session = conn->session; - struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session); - struct bnx2i_hba *hba = iscsi_host_priv(shost); - struct bnx2i_conn *bnx2i_conn = conn->dd_data; - struct scsi_cmnd *sc = task->sc; - struct bnx2i_cmd *cmd = task->dd_data; - struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr; - - if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state)) - return -ENOTCONN; - - if (!bnx2i_conn->is_bound) - return -ENOTCONN; - - /* - * If there is no scsi_cmnd this must be a mgmt task - */ - if (!sc) - return bnx2i_mtask_xmit(conn, task); - - bnx2i_setup_cmd_wqe_template(cmd); - cmd->req.op_code = ISCSI_OP_SCSI_CMD; - cmd->conn = bnx2i_conn; - cmd->scsi_cmd = sc; - cmd->req.total_data_transfer_length = scsi_bufflen(sc); - cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn); - - bnx2i_iscsi_map_sg_list(cmd); - bnx2i_cpy_scsi_cdb(sc, cmd); - - cmd->req.op_attr = ISCSI_ATTR_SIMPLE; - if (sc->sc_data_direction == DMA_TO_DEVICE) { - cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE; - cmd->req.itt = task->itt | - (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT); - bnx2i_setup_write_cmd_bd_info(task); - } else { - if (scsi_bufflen(sc)) - cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ; - cmd->req.itt = task->itt | - (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT); - } - - cmd->req.num_bds = cmd->io_tbl.bd_valid; - if (!cmd->io_tbl.bd_valid) { - cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma; - cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32); - cmd->req.num_bds = 1; - } - - bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd); - return 0; -} - -/** - * bnx2i_session_create - create a new iscsi session - * @cmds_max: max commands supported - * @qdepth: scsi queue depth to support - * @initial_cmdsn: initial iscsi CMDSN to be used for this session - * - * Creates a new iSCSI session instance on given device. - */ -static struct iscsi_cls_session * -bnx2i_session_create(struct iscsi_endpoint *ep, - uint16_t cmds_max, uint16_t qdepth, - uint32_t initial_cmdsn) -{ - struct Scsi_Host *shost; - struct iscsi_cls_session *cls_session; - struct bnx2i_hba *hba; - struct bnx2i_endpoint *bnx2i_ep; - - if (!ep) { - printk(KERN_ERR "bnx2i: missing ep.\n"); - return NULL; - } - - bnx2i_ep = ep->dd_data; - shost = bnx2i_ep->hba->shost; - hba = iscsi_host_priv(shost); - if (bnx2i_adapter_ready(hba)) - return NULL; - - /* - * user can override hw limit as long as it is within - * the min/max. - */ - if (cmds_max > hba->max_sqes) - cmds_max = hba->max_sqes; - else if (cmds_max < BNX2I_SQ_WQES_MIN) - cmds_max = BNX2I_SQ_WQES_MIN; - - cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost, - cmds_max, sizeof(struct bnx2i_cmd), - initial_cmdsn, ISCSI_MAX_TARGET); - if (!cls_session) - return NULL; - - if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data)) - goto session_teardown; - return cls_session; - -session_teardown: - iscsi_session_teardown(cls_session); - return NULL; -} - - -/** - * bnx2i_session_destroy - destroys iscsi session - * @cls_session: pointer to iscsi cls session - * - * Destroys previously created iSCSI session instance and releases - * all resources held by it - */ -static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session) -{ - struct iscsi_session *session = cls_session->dd_data; - struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); - struct bnx2i_hba *hba = iscsi_host_priv(shost); - - bnx2i_destroy_cmd_pool(hba, session); - iscsi_session_teardown(cls_session); -} - - -/** - * bnx2i_conn_create - create iscsi connection instance - * @cls_session: pointer to iscsi cls session - * @cid: iscsi cid as per rfc (not NX2's CID terminology) - * - * Creates a new iSCSI connection instance for a given session - */ -static struct iscsi_cls_conn * -bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid) -{ - struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); - struct bnx2i_hba *hba = iscsi_host_priv(shost); - struct bnx2i_conn *bnx2i_conn; - struct iscsi_cls_conn *cls_conn; - struct iscsi_conn *conn; - - cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn), - cid); - if (!cls_conn) - return NULL; - conn = cls_conn->dd_data; - - bnx2i_conn = conn->dd_data; - bnx2i_conn->cls_conn = cls_conn; - bnx2i_conn->hba = hba; - /* 'ep' ptr will be assigned in bind() call */ - bnx2i_conn->ep = NULL; - init_completion(&bnx2i_conn->cmd_cleanup_cmpl); - - if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) { - iscsi_conn_printk(KERN_ALERT, conn, - "conn_new: login resc alloc failed!!\n"); - goto free_conn; - } - - return cls_conn; - -free_conn: - iscsi_conn_teardown(cls_conn); - return NULL; -} - -/** - * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together - * @cls_session: pointer to iscsi cls session - * @cls_conn: pointer to iscsi cls conn - * @transport_fd: 64-bit EP handle - * @is_leading: leading connection on this session? - * - * Binds together iSCSI session instance, iSCSI connection instance - * and the TCP connection. This routine returns error code if - * TCP connection does not belong on the device iSCSI sess/conn - * is bound - */ -static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session, - struct iscsi_cls_conn *cls_conn, - uint64_t transport_fd, int is_leading) -{ - struct iscsi_conn *conn = cls_conn->dd_data; - struct bnx2i_conn *bnx2i_conn = conn->dd_data; - struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); - struct bnx2i_hba *hba = iscsi_host_priv(shost); - struct bnx2i_endpoint *bnx2i_ep; - struct iscsi_endpoint *ep; - int ret_code; - - ep = iscsi_lookup_endpoint(transport_fd); - if (!ep) - return -EINVAL; - - bnx2i_ep = ep->dd_data; - if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) || - (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) - /* Peer disconnect via' FIN or RST */ - return -EINVAL; - - if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) - return -EINVAL; - - if (bnx2i_ep->hba != hba) { - /* Error - TCP connection does not belong to this device - */ - iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, - "conn bind, ep=0x%p (%s) does not", - bnx2i_ep, bnx2i_ep->hba->netdev->name); - iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data, - "belong to hba (%s)\n", - hba->netdev->name); - return -EEXIST; - } - - bnx2i_ep->conn = bnx2i_conn; - bnx2i_conn->ep = bnx2i_ep; - bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid; - bnx2i_conn->fw_cid = bnx2i_ep->ep_cid; - bnx2i_conn->is_bound = 1; - - ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn, - bnx2i_ep->ep_iscsi_cid); - - /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710 - * driver needs to explicitly replenish RQ index during setup. - */ - if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) - bnx2i_put_rq_buf(bnx2i_conn, 0); - - bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE); - return ret_code; -} - - -/** - * bnx2i_conn_destroy - destroy iscsi connection instance & release resources - * @cls_conn: pointer to iscsi cls conn - * - * Destroy an iSCSI connection instance and release memory resources held by - * this connection - */ -static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn) -{ - struct iscsi_conn *conn = cls_conn->dd_data; - struct bnx2i_conn *bnx2i_conn = conn->dd_data; - struct Scsi_Host *shost; - struct bnx2i_hba *hba; - - shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn)); - hba = iscsi_host_priv(shost); - - bnx2i_conn_free_login_resources(hba, bnx2i_conn); - iscsi_conn_teardown(cls_conn); -} - - -/** - * bnx2i_conn_get_param - return iscsi connection parameter to caller - * @cls_conn: pointer to iscsi cls conn - * @param: parameter type identifier - * @buf: buffer pointer - * - * returns iSCSI connection parameters - */ -static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn, - enum iscsi_param param, char *buf) -{ - struct iscsi_conn *conn = cls_conn->dd_data; - struct bnx2i_conn *bnx2i_conn = conn->dd_data; - int len = 0; - - switch (param) { - case ISCSI_PARAM_CONN_PORT: - if (bnx2i_conn->ep) - len = sprintf(buf, "%hu\n", - bnx2i_conn->ep->cm_sk->dst_port); - break; - case ISCSI_PARAM_CONN_ADDRESS: - if (bnx2i_conn->ep) - len = sprintf(buf, NIPQUAD_FMT "\n", - NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip)); - break; - default: - return iscsi_conn_get_param(cls_conn, param, buf); - } - - return len; -} - -/** - * bnx2i_host_get_param - returns host (adapter) related parameters - * @shost: scsi host pointer - * @param: parameter type identifier - * @buf: buffer pointer - */ -static int bnx2i_host_get_param(struct Scsi_Host *shost, - enum iscsi_host_param param, char *buf) -{ - struct bnx2i_hba *hba = iscsi_host_priv(shost); - int len = 0; - - switch (param) { - case ISCSI_HOST_PARAM_HWADDRESS: - len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6); - break; - case ISCSI_HOST_PARAM_NETDEV_NAME: - len = sprintf(buf, "%s\n", hba->netdev->name); - break; - default: - return iscsi_host_get_param(shost, param, buf); - } - return len; -} - -/** - * bnx2i_conn_start - completes iscsi connection migration to FFP - * @cls_conn: pointer to iscsi cls conn - * - * last call in FFP migration to handover iscsi conn to the driver - */ -static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn) -{ - struct iscsi_conn *conn = cls_conn->dd_data; - struct bnx2i_conn *bnx2i_conn = conn->dd_data; - - bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START; - bnx2i_update_iscsi_conn(conn); - - /* - * this should normally not sleep for a long time so it should - * not disrupt the caller. - */ - bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies; - bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer; - bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep; - add_timer(&bnx2i_conn->ep->ofld_timer); - /* update iSCSI context for this conn, wait for CNIC to complete */ - wait_event_interruptible(bnx2i_conn->ep->ofld_wait, - bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START); - - if (signal_pending(current)) - flush_signals(current); - del_timer_sync(&bnx2i_conn->ep->ofld_timer); - - iscsi_conn_start(cls_conn); - return 0; -} - - -/** - * bnx2i_conn_get_stats - returns iSCSI stats - * @cls_conn: pointer to iscsi cls conn - * @stats: pointer to iscsi statistic struct - */ -static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn, - struct iscsi_stats *stats) -{ - struct iscsi_conn *conn = cls_conn->dd_data; - - stats->txdata_octets = conn->txdata_octets; - stats->rxdata_octets = conn->rxdata_octets; - stats->scsicmd_pdus = conn->scsicmd_pdus_cnt; - stats->dataout_pdus = conn->dataout_pdus_cnt; - stats->scsirsp_pdus = conn->scsirsp_pdus_cnt; - stats->datain_pdus = conn->datain_pdus_cnt; - stats->r2t_pdus = conn->r2t_pdus_cnt; - stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt; - stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt; - stats->custom_length = 3; - strcpy(stats->custom[2].desc, "eh_abort_cnt"); - stats->custom[2].value = conn->eh_abort_cnt; - stats->digest_err = 0; - stats->timeout_err = 0; - stats->custom_length = 0; -} - - -/** - * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices - * @dst_addr: target IP address - * - * check if route resolves to BNX2 device - */ -static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr) -{ - struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; - struct bnx2i_hba *hba; - struct cnic_dev *cnic = NULL; - - bnx2i_reg_dev_all(); - - hba = get_adapter_list_head(); - if (hba && hba->cnic) - cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI); - if (!cnic) { - printk(KERN_ALERT "bnx2i: no route," - "can't connect using cnic\n"); - goto no_nx2_route; - } - hba = bnx2i_find_hba_for_cnic(cnic); - if (!hba) - goto no_nx2_route; - - if (bnx2i_adapter_ready(hba)) { - printk(KERN_ALERT "bnx2i: check route, hba not found\n"); - goto no_nx2_route; - } - if (hba->netdev->mtu > hba->mtu_supported) { - printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n", - hba->netdev->name, hba->netdev->mtu); - printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n", - hba->mtu_supported); - goto no_nx2_route; - } - return hba; -no_nx2_route: - return NULL; -} - - -/** - * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources - * @hba: pointer to adapter instance - * @ep: endpoint (transport indentifier) structure - * - * destroys cm_sock structure and on chip iscsi context - */ -static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, - struct bnx2i_endpoint *ep) -{ - if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) - hba->cnic->cm_destroy(ep->cm_sk); - - if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state)) - ep->state = EP_STATE_DISCONN_COMPL; - - if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) && - ep->state == EP_STATE_DISCONN_TIMEDOUT) { - printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump," - " NW/PCIe trace, driver msgs to developers" - " for analysis\n"); - return 1; - } - - ep->state = EP_STATE_CLEANUP_START; - init_timer(&ep->ofld_timer); - ep->ofld_timer.expires = 10*HZ + jiffies; - ep->ofld_timer.function = bnx2i_ep_ofld_timer; - ep->ofld_timer.data = (unsigned long) ep; - add_timer(&ep->ofld_timer); - - bnx2i_ep_destroy_list_add(hba, ep); - - /* destroy iSCSI context, wait for it to complete */ - bnx2i_send_conn_destroy(hba, ep); - wait_event_interruptible(ep->ofld_wait, - (ep->state != EP_STATE_CLEANUP_START)); - - if (signal_pending(current)) - flush_signals(current); - del_timer_sync(&ep->ofld_timer); - - bnx2i_ep_destroy_list_del(hba, ep); - - if (ep->state != EP_STATE_CLEANUP_CMPL) - /* should never happen */ - printk(KERN_ALERT "bnx2i - conn destroy failed\n"); - - return 0; -} - - -/** - * bnx2i_ep_connect - establish TCP connection to target portal - * @shost: scsi host - * @dst_addr: target IP address - * @non_blocking: blocking or non-blocking call - * - * this routine initiates the TCP/IP connection by invoking Option-2 i/f - * with l5_core and the CNIC. This is a multi-step process of resolving - * route to target, create a iscsi connection context, handshaking with - * CNIC module to create/initialize the socket struct and finally - * sending down option-2 request to complete TCP 3-way handshake - */ -static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, - struct sockaddr *dst_addr, - int non_blocking) -{ - u32 iscsi_cid = BNX2I_CID_RESERVED; - struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr; - struct sockaddr_in6 *desti6; - struct bnx2i_endpoint *bnx2i_ep; - struct bnx2i_hba *hba; - struct cnic_dev *cnic; - struct cnic_sockaddr saddr; - struct iscsi_endpoint *ep; - int rc = 0; - - if (shost) - /* driver is given scsi host to work with */ - hba = iscsi_host_priv(shost); - else - /* - * check if the given destination can be reached through - * a iscsi capable NetXtreme2 device - */ - hba = bnx2i_check_route(dst_addr); - if (!hba) { - rc = -ENOMEM; - goto check_busy; - } - - cnic = hba->cnic; - ep = bnx2i_alloc_ep(hba); - if (!ep) { - rc = -ENOMEM; - goto check_busy; - } - bnx2i_ep = ep->dd_data; - - mutex_lock(&hba->net_dev_lock); - if (bnx2i_adapter_ready(hba)) { - rc = -EPERM; - goto net_if_down; - } - - bnx2i_ep->state = EP_STATE_IDLE; - bnx2i_ep->ep_iscsi_cid = (u16) -1; - bnx2i_ep->num_active_cmds = 0; - iscsi_cid = bnx2i_alloc_iscsi_cid(hba); - if (iscsi_cid == -1) { - printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n"); - rc = -ENOMEM; - goto iscsi_cid_err; - } - bnx2i_ep->hba_age = hba->age; - - rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep); - if (rc != 0) { - printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n"); - rc = -ENOMEM; - goto qp_resc_err; - } - - bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid; - bnx2i_ep->state = EP_STATE_OFLD_START; - bnx2i_ep_ofld_list_add(hba, bnx2i_ep); - - init_timer(&bnx2i_ep->ofld_timer); - bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies; - bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; - bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; - add_timer(&bnx2i_ep->ofld_timer); - - bnx2i_send_conn_ofld_req(hba, bnx2i_ep); - - /* Wait for CNIC hardware to setup conn context and return 'cid' */ - wait_event_interruptible(bnx2i_ep->ofld_wait, - bnx2i_ep->state != EP_STATE_OFLD_START); - - if (signal_pending(current)) - flush_signals(current); - del_timer_sync(&bnx2i_ep->ofld_timer); - - bnx2i_ep_ofld_list_del(hba, bnx2i_ep); - - if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) { - rc = -ENOSPC; - goto conn_failed; - } - - rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid, - iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep); - if (rc) { - rc = -EINVAL; - goto conn_failed; - } - - bnx2i_ep->cm_sk->rcv_buf = 256 * 1024; - bnx2i_ep->cm_sk->snd_buf = 256 * 1024; - clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags); - - memset(&saddr, 0, sizeof(saddr)); - if (dst_addr->sa_family == AF_INET) { - desti = (struct sockaddr_in *) dst_addr; - saddr.remote.v4 = *desti; - saddr.local.v4.sin_family = desti->sin_family; - } else if (dst_addr->sa_family == AF_INET6) { - desti6 = (struct sockaddr_in6 *) dst_addr; - saddr.remote.v6 = *desti6; - saddr.local.v6.sin6_family = desti6->sin6_family; - } - - bnx2i_ep->timestamp = jiffies; - bnx2i_ep->state = EP_STATE_CONNECT_START; - if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { - rc = -EINVAL; - goto conn_failed; - } else - rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr); - - if (rc) - goto release_ep; - - if (bnx2i_map_ep_dbell_regs(bnx2i_ep)) - goto release_ep; - mutex_unlock(&hba->net_dev_lock); - return ep; - -release_ep: - if (bnx2i_tear_down_conn(hba, bnx2i_ep)) { - mutex_unlock(&hba->net_dev_lock); - return ERR_PTR(rc); - } -conn_failed: -net_if_down: -iscsi_cid_err: - bnx2i_free_qp_resc(hba, bnx2i_ep); -qp_resc_err: - bnx2i_free_ep(ep); - mutex_unlock(&hba->net_dev_lock); -check_busy: - bnx2i_unreg_dev_all(); - return ERR_PTR(rc); -} - - -/** - * bnx2i_ep_poll - polls for TCP connection establishement - * @ep: TCP connection (endpoint) handle - * @timeout_ms: timeout value in milli secs - * - * polls for TCP connect request to complete - */ -static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) -{ - struct bnx2i_endpoint *bnx2i_ep; - int rc = 0; - - bnx2i_ep = ep->dd_data; - if ((bnx2i_ep->state == EP_STATE_IDLE) || - (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) || - (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) - return -1; - if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL) - return 1; - - rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait, - ((bnx2i_ep->state == - EP_STATE_OFLD_FAILED) || - (bnx2i_ep->state == - EP_STATE_CONNECT_FAILED) || - (bnx2i_ep->state == - EP_STATE_CONNECT_COMPL)), - msecs_to_jiffies(timeout_ms)); - if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED)) - rc = -1; - - if (rc > 0) - return 1; - else if (!rc) - return 0; /* timeout */ - else - return rc; -} - - -/** - * bnx2i_ep_tcp_conn_active - check EP state transition - * @ep: endpoint pointer - * - * check if underlying TCP connection is active - */ -static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep) -{ - int ret; - int cnic_dev_10g = 0; - - if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type)) - cnic_dev_10g = 1; - - switch (bnx2i_ep->state) { - case EP_STATE_CONNECT_START: - case EP_STATE_CLEANUP_FAILED: - case EP_STATE_OFLD_FAILED: - case EP_STATE_DISCONN_TIMEDOUT: - ret = 0; - break; - case EP_STATE_CONNECT_COMPL: - case EP_STATE_ULP_UPDATE_START: - case EP_STATE_ULP_UPDATE_COMPL: - case EP_STATE_TCP_FIN_RCVD: - case EP_STATE_ULP_UPDATE_FAILED: - ret = 1; - break; - case EP_STATE_TCP_RST_RCVD: - ret = 0; - break; - case EP_STATE_CONNECT_FAILED: - if (cnic_dev_10g) - ret = 1; - else - ret = 0; - break; - default: - ret = 0; - } - - return ret; -} - - -/** - * bnx2i_ep_disconnect - executes TCP connection teardown process - * @ep: TCP connection (endpoint) handle - * - * executes TCP connection teardown process - */ -static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep) -{ - struct bnx2i_endpoint *bnx2i_ep; - struct bnx2i_conn *bnx2i_conn = NULL; - struct iscsi_session *session = NULL; - struct iscsi_conn *conn; - struct cnic_dev *cnic; - struct bnx2i_hba *hba; - - bnx2i_ep = ep->dd_data; - - /* driver should not attempt connection cleanup untill TCP_CONNECT - * completes either successfully or fails. Timeout is 9-secs, so - * wait for it to complete - */ - while ((bnx2i_ep->state == EP_STATE_CONNECT_START) && - !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ))) - msleep(250); - - if (bnx2i_ep->conn) { - bnx2i_conn = bnx2i_ep->conn; - conn = bnx2i_conn->cls_conn->dd_data; - session = conn->session; - - spin_lock_bh(&session->lock); - bnx2i_conn->is_bound = 0; - spin_unlock_bh(&session->lock); - } - - hba = bnx2i_ep->hba; - if (bnx2i_ep->state == EP_STATE_IDLE) - goto return_bnx2i_ep; - cnic = hba->cnic; - - mutex_lock(&hba->net_dev_lock); - - if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) - goto free_resc; - if (bnx2i_ep->hba_age != hba->age) - goto free_resc; - - if (!bnx2i_ep_tcp_conn_active(bnx2i_ep)) - goto destory_conn; - - bnx2i_ep->state = EP_STATE_DISCONN_START; - - init_timer(&bnx2i_ep->ofld_timer); - bnx2i_ep->ofld_timer.expires = 10*HZ + jiffies; - bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; - bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; - add_timer(&bnx2i_ep->ofld_timer); - - if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) { - int close = 0; - - if (session) { - spin_lock_bh(&session->lock); - if (session->state == ISCSI_STATE_LOGGING_OUT) - close = 1; - spin_unlock_bh(&session->lock); - } - if (close) - cnic->cm_close(bnx2i_ep->cm_sk); - else - cnic->cm_abort(bnx2i_ep->cm_sk); - } else - goto free_resc; - - /* wait for option-2 conn teardown */ - wait_event_interruptible(bnx2i_ep->ofld_wait, - bnx2i_ep->state != EP_STATE_DISCONN_START); - - if (signal_pending(current)) - flush_signals(current); - del_timer_sync(&bnx2i_ep->ofld_timer); - -destory_conn: - if (bnx2i_tear_down_conn(hba, bnx2i_ep)) { - mutex_unlock(&hba->net_dev_lock); - return; - } -free_resc: - mutex_unlock(&hba->net_dev_lock); - bnx2i_free_qp_resc(hba, bnx2i_ep); -return_bnx2i_ep: - if (bnx2i_conn) - bnx2i_conn->ep = NULL; - - bnx2i_free_ep(ep); - - if (!hba->ofld_conns_active) - bnx2i_unreg_dev_all(); -} - - -/** - * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler - * @buf: pointer to buffer containing iscsi path message - * - */ -static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params) -{ - struct bnx2i_hba *hba = iscsi_host_priv(shost); - char *buf = (char *) params; - u16 len = sizeof(*params); - - /* handled by cnic driver */ - hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf, - len); - - return 0; -} - - -/* - * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template - * used while registering with the scsi host and iSCSI transport module. - */ -static struct scsi_host_template bnx2i_host_template = { - .module = THIS_MODULE, - .name = "Broadcom Offload iSCSI Initiator", - .proc_name = "bnx2i", - .queuecommand = iscsi_queuecommand, - .eh_abort_handler = iscsi_eh_abort, - .eh_device_reset_handler = iscsi_eh_device_reset, - .eh_target_reset_handler = iscsi_eh_target_reset, - .can_queue = 1024, - .max_sectors = 127, - .cmd_per_lun = 32, - .this_id = -1, - .use_clustering = ENABLE_CLUSTERING, - .sg_tablesize = ISCSI_MAX_BDS_PER_CMD, - .shost_attrs = bnx2i_dev_attributes, -}; - -struct iscsi_transport bnx2i_iscsi_transport = { - .owner = THIS_MODULE, - .name = "bnx2i", - .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | - CAP_MULTI_R2T | CAP_DATADGST | - CAP_DATA_PATH_OFFLOAD, - .param_mask = ISCSI_MAX_RECV_DLENGTH | - ISCSI_MAX_XMIT_DLENGTH | - ISCSI_HDRDGST_EN | - ISCSI_DATADGST_EN | - ISCSI_INITIAL_R2T_EN | - ISCSI_MAX_R2T | - ISCSI_IMM_DATA_EN | - ISCSI_FIRST_BURST | - ISCSI_MAX_BURST | - ISCSI_PDU_INORDER_EN | - ISCSI_DATASEQ_INORDER_EN | - ISCSI_ERL | - ISCSI_CONN_PORT | - ISCSI_CONN_ADDRESS | - ISCSI_EXP_STATSN | - ISCSI_PERSISTENT_PORT | - ISCSI_PERSISTENT_ADDRESS | - ISCSI_TARGET_NAME | ISCSI_TPGT | - ISCSI_USERNAME | ISCSI_PASSWORD | - ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | - ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | - ISCSI_LU_RESET_TMO | - ISCSI_PING_TMO | ISCSI_RECV_TMO | - ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, - .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME, - .create_session = bnx2i_session_create, - .destroy_session = bnx2i_session_destroy, - .create_conn = bnx2i_conn_create, - .bind_conn = bnx2i_conn_bind, - .destroy_conn = bnx2i_conn_destroy, - .set_param = iscsi_set_param, - .get_conn_param = bnx2i_conn_get_param, - .get_session_param = iscsi_session_get_param, - .get_host_param = bnx2i_host_get_param, - .start_conn = bnx2i_conn_start, - .stop_conn = iscsi_conn_stop, - .send_pdu = iscsi_conn_send_pdu, - .xmit_task = bnx2i_task_xmit, - .get_stats = bnx2i_conn_get_stats, - /* TCP connect - disconnect - option-2 interface calls */ - .ep_connect = bnx2i_ep_connect, - .ep_poll = bnx2i_ep_poll, - .ep_disconnect = bnx2i_ep_disconnect, - .set_path = bnx2i_nl_set_path, - /* Error recovery timeout call */ - .session_recovery_timedout = iscsi_session_recovery_timedout, - .cleanup_task = bnx2i_cleanup_task, -}; diff --git a/trunk/drivers/scsi/bnx2i/bnx2i_sysfs.c b/trunk/drivers/scsi/bnx2i/bnx2i_sysfs.c deleted file mode 100644 index 96426b751eb2..000000000000 --- a/trunk/drivers/scsi/bnx2i/bnx2i_sysfs.c +++ /dev/null @@ -1,142 +0,0 @@ -/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver. - * - * Copyright (c) 2004 - 2009 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation. - * - * Written by: Anil Veerabhadrappa (anilgv@broadcom.com) - */ - -#include "bnx2i.h" - -/** - * bnx2i_dev_to_hba - maps dev pointer to adapter struct - * @dev: device pointer - * - * Map device to hba structure - */ -static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev) -{ - struct Scsi_Host *shost = class_to_shost(dev); - return iscsi_host_priv(shost); -} - - -/** - * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size - * @dev: device pointer - * @buf: buffer to return current SQ size parameter - * - * Returns current SQ size parameter, this paramater determines the number - * outstanding iSCSI commands supported on a connection - */ -static ssize_t bnx2i_show_sq_info(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); - - return sprintf(buf, "0x%x\n", hba->max_sqes); -} - - -/** - * bnx2i_set_sq_info - update send queue (SQ) size parameter - * @dev: device pointer - * @buf: buffer to return current SQ size parameter - * @count: parameter buffer size - * - * Interface for user to change shared queue size allocated for each conn - * Must be within SQ limits and a power of 2. For the latter this is needed - * because of how libiscsi preallocates tasks. - */ -static ssize_t bnx2i_set_sq_info(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); - u32 val; - int max_sq_size; - - if (hba->ofld_conns_active) - goto skip_config; - - if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) - max_sq_size = BNX2I_5770X_SQ_WQES_MAX; - else - max_sq_size = BNX2I_570X_SQ_WQES_MAX; - - if (sscanf(buf, " 0x%x ", &val) > 0) { - if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) && - (is_power_of_2(val))) - hba->max_sqes = val; - } - - return count; - -skip_config: - printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n"); - return 0; -} - - -/** - * bnx2i_show_ccell_info - returns command cell (HQ) size - * @dev: device pointer - * @buf: buffer to return current SQ size parameter - * - * returns per-connection TCP history queue size parameter - */ -static ssize_t bnx2i_show_ccell_info(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); - - return sprintf(buf, "0x%x\n", hba->num_ccell); -} - - -/** - * bnx2i_get_link_state - set command cell (HQ) size - * @dev: device pointer - * @buf: buffer to return current SQ size parameter - * @count: parameter buffer size - * - * updates per-connection TCP history queue size parameter - */ -static ssize_t bnx2i_set_ccell_info(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - u32 val; - struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev); - - if (hba->ofld_conns_active) - goto skip_config; - - if (sscanf(buf, " 0x%x ", &val) > 0) { - if ((val >= BNX2I_CCELLS_MIN) && - (val <= BNX2I_CCELLS_MAX)) { - hba->num_ccell = val; - } - } - - return count; - -skip_config: - printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n"); - return 0; -} - - -static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR, - bnx2i_show_sq_info, bnx2i_set_sq_info); -static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR, - bnx2i_show_ccell_info, bnx2i_set_ccell_info); - -struct device_attribute *bnx2i_dev_attributes[] = { - &dev_attr_sq_size, - &dev_attr_num_ccell, - NULL -}; diff --git a/trunk/drivers/scsi/cxgb3i/cxgb3i.h b/trunk/drivers/scsi/cxgb3i/cxgb3i.h index e3133b58e594..59b0958d2d11 100644 --- a/trunk/drivers/scsi/cxgb3i/cxgb3i.h +++ b/trunk/drivers/scsi/cxgb3i/cxgb3i.h @@ -144,6 +144,7 @@ struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *); void cxgb3i_adapter_open(struct t3cdev *); void cxgb3i_adapter_close(struct t3cdev *); +struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *); struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *, struct net_device *); void cxgb3i_hba_host_remove(struct cxgb3i_hba *); diff --git a/trunk/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/trunk/drivers/scsi/cxgb3i/cxgb3i_iscsi.c index 74369a3f963b..9212400b9b13 100644 --- a/trunk/drivers/scsi/cxgb3i/cxgb3i_iscsi.c +++ b/trunk/drivers/scsi/cxgb3i/cxgb3i_iscsi.c @@ -13,7 +13,6 @@ #include #include -#include #include #include #include @@ -179,7 +178,7 @@ void cxgb3i_adapter_close(struct t3cdev *t3dev) * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device * @t3dev: t3cdev adapter */ -static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) +struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev) { struct cxgb3i_adapter *snic; int i; @@ -262,27 +261,20 @@ void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba) /** * cxgb3i_ep_connect - establish TCP connection to target portal - * @shost: scsi host to use * @dst_addr: target IP address * @non_blocking: blocking or non-blocking call * * Initiates a TCP/IP connection to the dst_addr */ -static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost, - struct sockaddr *dst_addr, +static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr, int non_blocking) { struct iscsi_endpoint *ep; struct cxgb3i_endpoint *cep; - struct cxgb3i_hba *hba = NULL; + struct cxgb3i_hba *hba; struct s3_conn *c3cn = NULL; int err = 0; - if (shost) - hba = iscsi_host_priv(shost); - - cxgb3i_api_debug("shost 0x%p, hba 0x%p.\n", shost, hba); - c3cn = cxgb3i_c3cn_create(); if (!c3cn) { cxgb3i_log_info("ep connect OOM.\n"); @@ -290,27 +282,17 @@ static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost, goto release_conn; } - err = cxgb3i_c3cn_connect(hba ? hba->ndev : NULL, c3cn, - (struct sockaddr_in *)dst_addr); + err = cxgb3i_c3cn_connect(c3cn, (struct sockaddr_in *)dst_addr); if (err < 0) { cxgb3i_log_info("ep connect failed.\n"); goto release_conn; } - hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev); if (!hba) { err = -ENOSPC; cxgb3i_log_info("NOT going through cxgbi device.\n"); goto release_conn; } - - if (shost && hba != iscsi_host_priv(shost)) { - err = -ENOSPC; - cxgb3i_log_info("Could not connect through request host%u\n", - shost->host_no); - goto release_conn; - } - if (c3cn_is_closing(c3cn)) { err = -ENOSPC; cxgb3i_log_info("ep connect unable to connect.\n"); diff --git a/trunk/drivers/scsi/cxgb3i/cxgb3i_offload.c b/trunk/drivers/scsi/cxgb3i/cxgb3i_offload.c index c1d5be4adf9c..e11c9c180f39 100644 --- a/trunk/drivers/scsi/cxgb3i/cxgb3i_offload.c +++ b/trunk/drivers/scsi/cxgb3i/cxgb3i_offload.c @@ -1479,13 +1479,12 @@ static struct net_device *cxgb3_egress_dev(struct net_device *root_dev, return NULL; } -static struct rtable *find_route(struct net_device *dev, - __be32 saddr, __be32 daddr, +static struct rtable *find_route(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport) { struct rtable *rt; struct flowi fl = { - .oif = dev ? dev->ifindex : 0, + .oif = 0, .nl_u = { .ip4_u = { .daddr = daddr, @@ -1574,40 +1573,36 @@ static int initiate_act_open(struct s3_conn *c3cn, struct net_device *dev) * * return 0 if active open request is sent, < 0 otherwise. */ -int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn, - struct sockaddr_in *usin) +int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin) { struct rtable *rt; + struct net_device *dev; struct cxgb3i_sdev_data *cdata; struct t3cdev *cdev; __be32 sipv4; int err; - c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev); - if (usin->sin_family != AF_INET) return -EAFNOSUPPORT; c3cn->daddr.sin_port = usin->sin_port; c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; - rt = find_route(dev, c3cn->saddr.sin_addr.s_addr, + rt = find_route(c3cn->saddr.sin_addr.s_addr, c3cn->daddr.sin_addr.s_addr, c3cn->saddr.sin_port, c3cn->daddr.sin_port); if (rt == NULL) { - c3cn_conn_debug("NO route to 0x%x, port %u, dev %s.\n", + c3cn_conn_debug("NO route to 0x%x, port %u.\n", c3cn->daddr.sin_addr.s_addr, - ntohs(c3cn->daddr.sin_port), - dev ? dev->name : "any"); + ntohs(c3cn->daddr.sin_port)); return -ENETUNREACH; } if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { - c3cn_conn_debug("multi-cast route to 0x%x, port %u, dev %s.\n", + c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n", c3cn->daddr.sin_addr.s_addr, - ntohs(c3cn->daddr.sin_port), - dev ? dev->name : "any"); + ntohs(c3cn->daddr.sin_port)); ip_rt_put(rt); return -ENETUNREACH; } diff --git a/trunk/drivers/scsi/cxgb3i/cxgb3i_offload.h b/trunk/drivers/scsi/cxgb3i/cxgb3i_offload.h index 6a1d86b1fafe..ebfca960c0a9 100644 --- a/trunk/drivers/scsi/cxgb3i/cxgb3i_offload.h +++ b/trunk/drivers/scsi/cxgb3i/cxgb3i_offload.h @@ -169,8 +169,7 @@ void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *); void cxgb3i_sdev_remove(struct t3cdev *); struct s3_conn *cxgb3i_c3cn_create(void); -int cxgb3i_c3cn_connect(struct net_device *, struct s3_conn *, - struct sockaddr_in *); +int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *); void cxgb3i_c3cn_rx_credits(struct s3_conn *, int); int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *); void cxgb3i_c3cn_release(struct s3_conn *); diff --git a/trunk/drivers/scsi/device_handler/scsi_dh_rdac.c b/trunk/drivers/scsi/device_handler/scsi_dh_rdac.c index fd0544f7da81..43b8c51e98d0 100644 --- a/trunk/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/trunk/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -561,12 +561,6 @@ static int rdac_check_sense(struct scsi_device *sdev, struct rdac_dh_data *h = get_rdac_data(sdev); switch (sense_hdr->sense_key) { case NOT_READY: - if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01) - /* LUN Not Ready - Logical Unit Not Ready and is in - * the process of becoming ready - * Just retry. - */ - return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81) /* LUN Not Ready - Storage firmware incompatible * Manual code synchonisation required. diff --git a/trunk/drivers/scsi/fcoe/fcoe.c b/trunk/drivers/scsi/fcoe/fcoe.c index e606b4829d44..03e1926f40b5 100644 --- a/trunk/drivers/scsi/fcoe/fcoe.c +++ b/trunk/drivers/scsi/fcoe/fcoe.c @@ -54,6 +54,7 @@ MODULE_LICENSE("GPL v2"); /* fcoe host list */ LIST_HEAD(fcoe_hostlist); DEFINE_RWLOCK(fcoe_hostlist_lock); +DEFINE_TIMER(fcoe_timer, NULL, 0, 0); DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu); /* Function Prototypes */ @@ -70,7 +71,7 @@ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *); static int fcoe_hostlist_add(const struct fc_lport *); static int fcoe_hostlist_remove(const struct fc_lport *); -static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *); +static int fcoe_check_wait_queue(struct fc_lport *); static int fcoe_device_notification(struct notifier_block *, ulong, void *); static void fcoe_dev_setup(void); static void fcoe_dev_cleanup(void); @@ -145,7 +146,6 @@ static int fcoe_lport_config(struct fc_lport *lp) lp->link_up = 0; lp->qfull = 0; lp->max_retry_count = 3; - lp->max_rport_retry_count = 3; lp->e_d_tov = 2 * 1000; /* FC-FS default */ lp->r_a_tov = 2 * 2 * 1000; lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | @@ -166,18 +166,6 @@ static int fcoe_lport_config(struct fc_lport *lp) return 0; } -/** - * fcoe_queue_timer() - fcoe queue timer - * @lp: the fc_lport pointer - * - * Calls fcoe_check_wait_queue on timeout - * - */ -static void fcoe_queue_timer(ulong lp) -{ - fcoe_check_wait_queue((struct fc_lport *)lp, NULL); -} - /** * fcoe_netdev_config() - Set up netdev for SW FCoE * @lp : ptr to the fc_lport @@ -248,7 +236,6 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev) } skb_queue_head_init(&fc->fcoe_pending_queue); fc->fcoe_pending_queue_active = 0; - setup_timer(&fc->timer, fcoe_queue_timer, (unsigned long)lp); /* setup Source Mac Address */ memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr, @@ -399,9 +386,6 @@ static int fcoe_if_destroy(struct net_device *netdev) /* Free existing skbs */ fcoe_clean_pending_queue(lp); - /* Stop the timer */ - del_timer_sync(&fc->timer); - /* Free memory used by statistical counters */ fc_lport_free_stats(lp); @@ -1004,7 +988,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp) */ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) { - int wlen; + int wlen, rc = 0; u32 crc; struct ethhdr *eh; struct fcoe_crc_eof *cp; @@ -1037,7 +1021,8 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) sof = fr_sof(fp); eof = fr_eof(fp); - elen = sizeof(struct ethhdr); + elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ? + sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr); hlen = sizeof(struct fcoe_hdr); tlen = sizeof(struct fcoe_crc_eof); wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; @@ -1122,9 +1107,18 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp) /* send down to lld */ fr_dev(fp) = lp; if (fc->fcoe_pending_queue.qlen) - fcoe_check_wait_queue(lp, skb); - else if (fcoe_start_io(skb)) - fcoe_check_wait_queue(lp, skb); + rc = fcoe_check_wait_queue(lp); + + if (rc == 0) + rc = fcoe_start_io(skb); + + if (rc) { + spin_lock_bh(&fc->fcoe_pending_queue.lock); + __skb_queue_tail(&fc->fcoe_pending_queue, skb); + spin_unlock_bh(&fc->fcoe_pending_queue.lock); + if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) + lp->qfull = 1; + } return 0; } @@ -1273,6 +1267,32 @@ int fcoe_percpu_receive_thread(void *arg) return 0; } +/** + * fcoe_watchdog() - fcoe timer callback + * @vp: + * + * This checks the pending queue length for fcoe and set lport qfull + * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the + * fcoe_hostlist. + * + * Returns: 0 for success + */ +void fcoe_watchdog(ulong vp) +{ + struct fcoe_softc *fc; + + read_lock(&fcoe_hostlist_lock); + list_for_each_entry(fc, &fcoe_hostlist, list) { + if (fc->ctlr.lp) + fcoe_check_wait_queue(fc->ctlr.lp); + } + read_unlock(&fcoe_hostlist_lock); + + fcoe_timer.expires = jiffies + (1 * HZ); + add_timer(&fcoe_timer); +} + + /** * fcoe_check_wait_queue() - attempt to clear the transmit backlog * @lp: the fc_lport @@ -1285,17 +1305,16 @@ int fcoe_percpu_receive_thread(void *arg) * The wait_queue is used when the skb transmit fails. skb will go * in the wait_queue which will be emptied by the timer function or * by the next skb transmit. + * + * Returns: 0 for success */ -static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb) +static int fcoe_check_wait_queue(struct fc_lport *lp) { struct fcoe_softc *fc = lport_priv(lp); - int rc; + struct sk_buff *skb; + int rc = -1; spin_lock_bh(&fc->fcoe_pending_queue.lock); - - if (skb) - __skb_queue_tail(&fc->fcoe_pending_queue, skb); - if (fc->fcoe_pending_queue_active) goto out; fc->fcoe_pending_queue_active = 1; @@ -1321,26 +1340,23 @@ static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb) if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH) lp->qfull = 0; - if (fc->fcoe_pending_queue.qlen && !timer_pending(&fc->timer)) - mod_timer(&fc->timer, jiffies + 2); fc->fcoe_pending_queue_active = 0; + rc = fc->fcoe_pending_queue.qlen; out: - if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH) - lp->qfull = 1; spin_unlock_bh(&fc->fcoe_pending_queue.lock); - return; + return rc; } /** * fcoe_dev_setup() - setup link change notification interface */ -static void fcoe_dev_setup(void) +static void fcoe_dev_setup() { register_netdevice_notifier(&fcoe_notifier); } /** - * fcoe_dev_cleanup() - cleanup link change notification interface + * fcoe_dev_setup() - cleanup link change notification interface */ static void fcoe_dev_cleanup(void) { @@ -1799,6 +1815,10 @@ static int __init fcoe_init(void) /* Setup link change notification */ fcoe_dev_setup(); + setup_timer(&fcoe_timer, fcoe_watchdog, 0); + + mod_timer(&fcoe_timer, jiffies + (10 * HZ)); + fcoe_if_init(); return 0; @@ -1824,6 +1844,9 @@ static void __exit fcoe_exit(void) fcoe_dev_cleanup(); + /* Stop the timer */ + del_timer_sync(&fcoe_timer); + /* releases the associated fcoe hosts */ list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list) fcoe_if_destroy(fc->real_dev); diff --git a/trunk/drivers/scsi/fcoe/fcoe.h b/trunk/drivers/scsi/fcoe/fcoe.h index a1eb8c1988b0..917aae886897 100644 --- a/trunk/drivers/scsi/fcoe/fcoe.h +++ b/trunk/drivers/scsi/fcoe/fcoe.h @@ -61,7 +61,6 @@ struct fcoe_softc { struct packet_type fip_packet_type; struct sk_buff_head fcoe_pending_queue; u8 fcoe_pending_queue_active; - struct timer_list timer; /* queue timer */ struct fcoe_ctlr ctlr; }; diff --git a/trunk/drivers/scsi/fcoe/libfcoe.c b/trunk/drivers/scsi/fcoe/libfcoe.c index 929411880e4b..62ba0f39c6bd 100644 --- a/trunk/drivers/scsi/fcoe/libfcoe.c +++ b/trunk/drivers/scsi/fcoe/libfcoe.c @@ -213,7 +213,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf) sol->desc.size.fd_size = htons(fcoe_size); skb_put(skb, sizeof(*sol)); - skb->protocol = htons(ETH_P_FIP); + skb->protocol = htons(ETH_P_802_3); skb_reset_mac_header(skb); skb_reset_network_header(skb); fip->send(fip, skb); @@ -365,7 +365,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa) } skb_put(skb, len); - skb->protocol = htons(ETH_P_FIP); + skb->protocol = htons(ETH_P_802_3); skb_reset_mac_header(skb); skb_reset_network_header(skb); fip->send(fip, skb); @@ -424,7 +424,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, if (dtype != ELS_FLOGI) memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN); - skb->protocol = htons(ETH_P_FIP); + skb->protocol = htons(ETH_P_802_3); skb_reset_mac_header(skb); skb_reset_network_header(skb); return 0; @@ -447,10 +447,14 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb) u16 old_xid; u8 op; + if (fip->state == FIP_ST_NON_FIP) + return 0; + fh = (struct fc_frame_header *)skb->data; op = *(u8 *)(fh + 1); - if (op == ELS_FLOGI) { + switch (op) { + case ELS_FLOGI: old_xid = fip->flogi_oxid; fip->flogi_oxid = ntohs(fh->fh_ox_id); if (fip->state == FIP_ST_AUTO) { @@ -462,15 +466,6 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb) fip->map_dest = 1; return 0; } - if (fip->state == FIP_ST_NON_FIP) - fip->map_dest = 1; - } - - if (fip->state == FIP_ST_NON_FIP) - return 0; - - switch (op) { - case ELS_FLOGI: op = FIP_DT_FLOGI; break; case ELS_FDISC: diff --git a/trunk/drivers/scsi/fnic/fnic_main.c b/trunk/drivers/scsi/fnic/fnic_main.c index a84072865fc2..32ef6b87d895 100644 --- a/trunk/drivers/scsi/fnic/fnic_main.c +++ b/trunk/drivers/scsi/fnic/fnic_main.c @@ -680,7 +680,6 @@ static int __devinit fnic_probe(struct pci_dev *pdev, } lp->max_retry_count = fnic->config.flogi_retries; - lp->max_rport_retry_count = fnic->config.plogi_retries; lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | FCP_SPPF_CONF_COMPL); if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) diff --git a/trunk/drivers/scsi/gdth_proc.c b/trunk/drivers/scsi/gdth_proc.c index 1258da34fbc2..59349a316e13 100644 --- a/trunk/drivers/scsi/gdth_proc.c +++ b/trunk/drivers/scsi/gdth_proc.c @@ -152,7 +152,6 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, struct Scsi_Host *host, gdth_ha_str *ha) { int size = 0,len = 0; - int hlen; off_t begin = 0,pos = 0; int id, i, j, k, sec, flag; int no_mdrv = 0, drv_no, is_mirr; @@ -193,11 +192,11 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, if (reserve_list[0] == 0xff) strcpy(hrec, "--"); else { - hlen = sprintf(hrec, "%d", reserve_list[0]); + sprintf(hrec, "%d", reserve_list[0]); for (i = 1; i < MAX_RES_ARGS; i++) { if (reserve_list[i] == 0xff) break; - hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]); + sprintf(hrec,"%s,%d", hrec, reserve_list[i]); } } size = sprintf(buffer+len, diff --git a/trunk/drivers/scsi/ibmvscsi/ibmvfc.c b/trunk/drivers/scsi/ibmvscsi/ibmvfc.c index b4b805e8d7db..ea4abee7a2a9 100644 --- a/trunk/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/trunk/drivers/scsi/ibmvscsi/ibmvfc.c @@ -110,7 +110,7 @@ static const struct { { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" }, - { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" }, + { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" }, { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" }, { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" }, @@ -143,7 +143,6 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *); static void ibmvfc_tgt_send_prli(struct ibmvfc_target *); static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *); static void ibmvfc_tgt_query_target(struct ibmvfc_target *); -static void ibmvfc_npiv_logout(struct ibmvfc_host *); static const char *unknown_error = "unknown error"; @@ -276,7 +275,7 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd) int fc_rsp_len = rsp->fcp_rsp_len; if ((rsp->flags & FCP_RSP_LEN_VALID) && - ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || + ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) || rsp->data.info.rsp_code)) return DID_ERROR << 16; @@ -432,8 +431,6 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt, case IBMVFC_TGT_ACTION_DEL_RPORT: break; default: - if (action == IBMVFC_TGT_ACTION_DEL_RPORT) - tgt->add_rport = 0; tgt->action = action; break; } @@ -478,10 +475,6 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) vhost->action = action; break; - case IBMVFC_HOST_ACTION_LOGO_WAIT: - if (vhost->action == IBMVFC_HOST_ACTION_LOGO) - vhost->action = action; - break; case IBMVFC_HOST_ACTION_INIT_WAIT: if (vhost->action == IBMVFC_HOST_ACTION_INIT) vhost->action = action; @@ -490,7 +483,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, switch (vhost->action) { case IBMVFC_HOST_ACTION_INIT_WAIT: case IBMVFC_HOST_ACTION_NONE: - case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: + case IBMVFC_HOST_ACTION_TGT_ADD: vhost->action = action; break; default: @@ -501,11 +494,11 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost, if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS) vhost->action = action; break; - case IBMVFC_HOST_ACTION_LOGO: case IBMVFC_HOST_ACTION_INIT: case IBMVFC_HOST_ACTION_TGT_DEL: case IBMVFC_HOST_ACTION_QUERY_TGTS: case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: + case IBMVFC_HOST_ACTION_TGT_ADD: case IBMVFC_HOST_ACTION_NONE: default: vhost->action = action; @@ -583,7 +576,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin) } list_for_each_entry(tgt, &vhost->targets, queue) - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + tgt->need_login = 1; scsi_block_requests(vhost->host); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); vhost->job_step = ibmvfc_npiv_login; @@ -653,7 +646,6 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost) } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); vhost->state = IBMVFC_NO_CRQ; - vhost->logged_in = 0; dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL); free_page((unsigned long)crq->msgs); } @@ -700,7 +692,6 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) } while (rc == H_BUSY || H_IS_LONG_BUSY(rc)); vhost->state = IBMVFC_NO_CRQ; - vhost->logged_in = 0; ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); /* Clean out the queue */ @@ -816,10 +807,10 @@ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code) } /** - * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ + * __ibmvfc_reset_host - Reset the connection to the server (no locking) * @vhost: struct ibmvfc host to reset **/ -static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost) +static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) { int rc; @@ -834,25 +825,9 @@ static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost) ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); } -/** - * __ibmvfc_reset_host - Reset the connection to the server (no locking) - * @vhost: struct ibmvfc host to reset - **/ -static void __ibmvfc_reset_host(struct ibmvfc_host *vhost) -{ - if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT && - !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { - scsi_block_requests(vhost->host); - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO); - vhost->job_step = ibmvfc_npiv_logout; - wake_up(&vhost->work_wait_q); - } else - ibmvfc_hard_reset_host(vhost); -} - /** * ibmvfc_reset_host - Reset the connection to the server - * @vhost: ibmvfc host struct + * @vhost: struct ibmvfc host to reset **/ static void ibmvfc_reset_host(struct ibmvfc_host *vhost) { @@ -867,13 +842,9 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost) * ibmvfc_retry_host_init - Retry host initialization if allowed * @vhost: ibmvfc host struct * - * Returns: 1 if init will be retried / 0 if not - * **/ -static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost) +static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost) { - int retry = 0; - if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) { vhost->delay_init = 1; if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) { @@ -882,14 +853,11 @@ static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost) ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE); } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES) __ibmvfc_reset_host(vhost); - else { + else ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); - retry = 1; - } } wake_up(&vhost->work_wait_q); - return retry; } /** @@ -1169,9 +1137,8 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost) login_info->partition_num = vhost->partition_number; login_info->vfc_frame_version = 1; login_info->fcp_version = 3; - login_info->flags = IBMVFC_FLUSH_ON_HALT; if (vhost->client_migrated) - login_info->flags |= IBMVFC_CLIENT_MIGRATED; + login_info->flags = IBMVFC_CLIENT_MIGRATED; login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ; login_info->capabilities = IBMVFC_CAN_MIGRATE; @@ -1484,27 +1451,6 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt) rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status); } -/** - * ibmvfc_relogin - Log back into the specified device - * @sdev: scsi device struct - * - **/ -static void ibmvfc_relogin(struct scsi_device *sdev) -{ - struct ibmvfc_host *vhost = shost_priv(sdev->host); - struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); - struct ibmvfc_target *tgt; - - list_for_each_entry(tgt, &vhost->targets, queue) { - if (rport == tgt->rport) { - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); - break; - } - } - - ibmvfc_reinit_host(vhost); -} - /** * ibmvfc_scsi_done - Handle responses from commands * @evt: ibmvfc event to be handled @@ -1537,7 +1483,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt) if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8) memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len); if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED)) - ibmvfc_relogin(cmnd->device); + ibmvfc_reinit_host(evt->vhost); if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER))) cmnd->result = (DID_ERROR << 16); @@ -2202,31 +2148,13 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, struct ibmvfc_host *vhost) { const char *desc = ibmvfc_get_ae_desc(crq->event); - struct ibmvfc_target *tgt; ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx," " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name); switch (crq->event) { - case IBMVFC_AE_RESUME: - switch (crq->link_state) { - case IBMVFC_AE_LS_LINK_DOWN: - ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); - break; - case IBMVFC_AE_LS_LINK_DEAD: - ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); - break; - case IBMVFC_AE_LS_LINK_UP: - case IBMVFC_AE_LS_LINK_BOUNCED: - default: - vhost->events_to_log |= IBMVFC_AE_LINKUP; - vhost->delay_init = 1; - __ibmvfc_reset_host(vhost); - break; - }; - - break; case IBMVFC_AE_LINK_UP: + case IBMVFC_AE_RESUME: vhost->events_to_log |= IBMVFC_AE_LINKUP; vhost->delay_init = 1; __ibmvfc_reset_host(vhost); @@ -2240,23 +2168,9 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq, case IBMVFC_AE_SCN_NPORT: case IBMVFC_AE_SCN_GROUP: vhost->events_to_log |= IBMVFC_AE_RSCN; - ibmvfc_reinit_host(vhost); - break; case IBMVFC_AE_ELS_LOGO: case IBMVFC_AE_ELS_PRLO: case IBMVFC_AE_ELS_PLOGI: - list_for_each_entry(tgt, &vhost->targets, queue) { - if (!crq->scsi_id && !crq->wwpn && !crq->node_name) - break; - if (crq->scsi_id && tgt->scsi_id != crq->scsi_id) - continue; - if (crq->wwpn && tgt->ids.port_name != crq->wwpn) - continue; - if (crq->node_name && tgt->ids.node_name != crq->node_name) - continue; - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); - } - ibmvfc_reinit_host(vhost); break; case IBMVFC_AE_LINK_DOWN: @@ -2308,7 +2222,6 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost) return; case IBMVFC_CRQ_XPORT_EVENT: vhost->state = IBMVFC_NO_CRQ; - vhost->logged_in = 0; ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); if (crq->format == IBMVFC_PARTITION_MIGRATED) { /* We need to re-setup the interpartition connection */ @@ -2386,7 +2299,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time) done = 1; } - if (vhost->scan_complete) + if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE) done = 1; spin_unlock_irqrestore(shost->host_lock, flags); return done; @@ -2521,6 +2434,14 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev, vhost->login_buf->resp.partition_name); } +static struct device_attribute ibmvfc_host_partition_name = { + .attr = { + .name = "partition_name", + .mode = S_IRUGO, + }, + .show = ibmvfc_show_host_partition_name, +}; + static ssize_t ibmvfc_show_host_device_name(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2531,6 +2452,14 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev, vhost->login_buf->resp.device_name); } +static struct device_attribute ibmvfc_host_device_name = { + .attr = { + .name = "device_name", + .mode = S_IRUGO, + }, + .show = ibmvfc_show_host_device_name, +}; + static ssize_t ibmvfc_show_host_loc_code(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2541,6 +2470,14 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev, vhost->login_buf->resp.port_loc_code); } +static struct device_attribute ibmvfc_host_loc_code = { + .attr = { + .name = "port_loc_code", + .mode = S_IRUGO, + }, + .show = ibmvfc_show_host_loc_code, +}; + static ssize_t ibmvfc_show_host_drc_name(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2551,6 +2488,14 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev, vhost->login_buf->resp.drc_name); } +static struct device_attribute ibmvfc_host_drc_name = { + .attr = { + .name = "drc_name", + .mode = S_IRUGO, + }, + .show = ibmvfc_show_host_drc_name, +}; + static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2559,13 +2504,13 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev, return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version); } -static ssize_t ibmvfc_show_host_capabilities(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct ibmvfc_host *vhost = shost_priv(shost); - return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities); -} +static struct device_attribute ibmvfc_host_npiv_version = { + .attr = { + .name = "npiv_version", + .mode = S_IRUGO, + }, + .show = ibmvfc_show_host_npiv_version, +}; /** * ibmvfc_show_log_level - Show the adapter's error logging level @@ -2611,14 +2556,14 @@ static ssize_t ibmvfc_store_log_level(struct device *dev, return strlen(buf); } -static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL); -static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL); -static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL); -static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL); -static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL); -static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL); -static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR, - ibmvfc_show_log_level, ibmvfc_store_log_level); +static struct device_attribute ibmvfc_log_level_attr = { + .attr = { + .name = "log_level", + .mode = S_IRUGO | S_IWUSR, + }, + .show = ibmvfc_show_log_level, + .store = ibmvfc_store_log_level +}; #ifdef CONFIG_SCSI_IBMVFC_TRACE /** @@ -2667,13 +2612,12 @@ static struct bin_attribute ibmvfc_trace_attr = { #endif static struct device_attribute *ibmvfc_attrs[] = { - &dev_attr_partition_name, - &dev_attr_device_name, - &dev_attr_port_loc_code, - &dev_attr_drc_name, - &dev_attr_npiv_version, - &dev_attr_capabilities, - &dev_attr_log_level, + &ibmvfc_host_partition_name, + &ibmvfc_host_device_name, + &ibmvfc_host_loc_code, + &ibmvfc_host_drc_name, + &ibmvfc_host_npiv_version, + &ibmvfc_log_level_attr, NULL }; @@ -2830,19 +2774,15 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt, * @tgt: ibmvfc target struct * @job_step: initialization job step * - * Returns: 1 if step will be retried / 0 if not - * **/ -static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, +static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt, void (*job_step) (struct ibmvfc_target *)) { if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) { ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); wake_up(&tgt->vhost->work_wait_q); - return 0; } else ibmvfc_init_tgt(tgt, job_step); - return 1; } /* Defined in FC-LS */ @@ -2891,7 +2831,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli; struct ibmvfc_prli_svc_parms *parms = &rsp->parms; u32 status = rsp->common.status; - int index, level = IBMVFC_DEFAULT_LOG_LEVEL; + int index; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); @@ -2910,7 +2850,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET; if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC) tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR; - tgt->add_rport = 1; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT); } else ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); } else if (prli_rsp[index].retry) @@ -2927,14 +2867,13 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt) break; case IBMVFC_MAD_FAILED: default: + tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), + rsp->status, rsp->error, status); if (ibmvfc_retry_cmd(rsp->status, rsp->error)) - level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); + ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli); else ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); - - tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), - rsp->status, rsp->error, status); break; }; @@ -2993,7 +2932,6 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi; u32 status = rsp->common.status; - int level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); @@ -3022,15 +2960,15 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt) break; case IBMVFC_MAD_FAILED: default: - if (ibmvfc_retry_cmd(rsp->status, rsp->error)) - level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); - else - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); - - tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", + tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status); + + if (ibmvfc_retry_cmd(rsp->status, rsp->error)) + ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi); + else + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); break; }; @@ -3191,13 +3129,13 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt) case IBMVFC_MAD_SUCCESS: tgt_dbg(tgt, "ADISC succeeded\n"); if (ibmvfc_adisc_needs_plogi(mad, tgt)) - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + tgt->need_login = 1; break; case IBMVFC_MAD_DRIVER_FAILED: break; case IBMVFC_MAD_FAILED: default: - ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); + tgt->need_login = 1; fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16; fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8; tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", @@ -3384,7 +3322,6 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt; u32 status = rsp->common.status; - int level = IBMVFC_DEFAULT_LOG_LEVEL; vhost->discovery_threads--; ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); @@ -3404,19 +3341,19 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt) break; case IBMVFC_MAD_FAILED: default: + tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, + ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, + ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status); + if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED && rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ && rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); else if (ibmvfc_retry_cmd(rsp->status, rsp->error)) - level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); + ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target); else ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); - - tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error, - ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type, - ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status); break; }; @@ -3483,7 +3420,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id) } spin_unlock_irqrestore(vhost->host->host_lock, flags); - tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO); + tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL); if (!tgt) { dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n", scsi_id); @@ -3535,7 +3472,6 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) struct ibmvfc_host *vhost = evt->vhost; struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets; u32 mad_status = rsp->common.status; - int level = IBMVFC_DEFAULT_LOG_LEVEL; switch (mad_status) { case IBMVFC_MAD_SUCCESS: @@ -3544,9 +3480,9 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS); break; case IBMVFC_MAD_FAILED: - level += ibmvfc_retry_host_init(vhost); - ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); + dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); + ibmvfc_retry_host_init(vhost); break; case IBMVFC_MAD_DRIVER_FAILED: break; @@ -3598,19 +3534,18 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) u32 mad_status = evt->xfer_iu->npiv_login.common.status; struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp; unsigned int npiv_max_sectors; - int level = IBMVFC_DEFAULT_LOG_LEVEL; switch (mad_status) { case IBMVFC_MAD_SUCCESS: ibmvfc_free_event(evt); break; case IBMVFC_MAD_FAILED: + dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n", + ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); if (ibmvfc_retry_cmd(rsp->status, rsp->error)) - level += ibmvfc_retry_host_init(vhost); + ibmvfc_retry_host_init(vhost); else ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); - ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n", - ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error); ibmvfc_free_event(evt); return; case IBMVFC_MAD_CRQ_ERROR: @@ -3643,7 +3578,6 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) return; } - vhost->logged_in = 1; npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS); dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n", rsp->partition_name, rsp->device_name, rsp->port_loc_code, @@ -3701,65 +3635,6 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost) ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); }; -/** - * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout - * @vhost: ibmvfc host struct - * - **/ -static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt) -{ - struct ibmvfc_host *vhost = evt->vhost; - u32 mad_status = evt->xfer_iu->npiv_logout.common.status; - - ibmvfc_free_event(evt); - - switch (mad_status) { - case IBMVFC_MAD_SUCCESS: - if (list_empty(&vhost->sent) && - vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) { - ibmvfc_init_host(vhost, 0); - return; - } - break; - case IBMVFC_MAD_FAILED: - case IBMVFC_MAD_NOT_SUPPORTED: - case IBMVFC_MAD_CRQ_ERROR: - case IBMVFC_MAD_DRIVER_FAILED: - default: - ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status); - break; - } - - ibmvfc_hard_reset_host(vhost); -} - -/** - * ibmvfc_npiv_logout - Issue an NPIV Logout - * @vhost: ibmvfc host struct - * - **/ -static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost) -{ - struct ibmvfc_npiv_logout_mad *mad; - struct ibmvfc_event *evt; - - evt = ibmvfc_get_event(vhost); - ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT); - - mad = &evt->iu.npiv_logout; - memset(mad, 0, sizeof(*mad)); - mad->common.version = 1; - mad->common.opcode = IBMVFC_NPIV_LOGOUT; - mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad); - - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT); - - if (!ibmvfc_send_event(evt, vhost, default_timeout)) - ibmvfc_dbg(vhost, "Sent NPIV logout\n"); - else - ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); -} - /** * ibmvfc_dev_init_to_do - Is there target initialization work to do? * @vhost: ibmvfc host struct @@ -3796,7 +3671,6 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost) switch (vhost->action) { case IBMVFC_HOST_ACTION_NONE: case IBMVFC_HOST_ACTION_INIT_WAIT: - case IBMVFC_HOST_ACTION_LOGO_WAIT: return 0; case IBMVFC_HOST_ACTION_TGT_INIT: case IBMVFC_HOST_ACTION_QUERY_TGTS: @@ -3809,9 +3683,9 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost) if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT) return 0; return 1; - case IBMVFC_HOST_ACTION_LOGO: case IBMVFC_HOST_ACTION_INIT: case IBMVFC_HOST_ACTION_ALLOC_TGTS: + case IBMVFC_HOST_ACTION_TGT_ADD: case IBMVFC_HOST_ACTION_TGT_DEL: case IBMVFC_HOST_ACTION_TGT_DEL_FAILED: case IBMVFC_HOST_ACTION_QUERY: @@ -3866,26 +3740,25 @@ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events) static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) { struct ibmvfc_host *vhost = tgt->vhost; - struct fc_rport *rport; + struct fc_rport *rport = tgt->rport; unsigned long flags; - tgt_dbg(tgt, "Adding rport\n"); - rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); - spin_lock_irqsave(vhost->host->host_lock, flags); - - if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) { - tgt_dbg(tgt, "Deleting rport\n"); - list_del(&tgt->queue); + if (rport) { + tgt_dbg(tgt, "Setting rport roles\n"); + fc_remote_port_rolechg(rport, tgt->ids.roles); + spin_lock_irqsave(vhost->host->host_lock, flags); + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); spin_unlock_irqrestore(vhost->host->host_lock, flags); - fc_remote_port_delete(rport); - del_timer_sync(&tgt->timer); - kref_put(&tgt->kref, ibmvfc_release_tgt); return; } + tgt_dbg(tgt, "Adding rport\n"); + rport = fc_remote_port_add(vhost->host, 0, &tgt->ids); + spin_lock_irqsave(vhost->host->host_lock, flags); + tgt->rport = rport; + ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE); if (rport) { tgt_dbg(tgt, "rport add succeeded\n"); - tgt->rport = rport; rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff; rport->supported_classes = 0; tgt->target_id = rport->scsi_target_id; @@ -3916,12 +3789,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) vhost->events_to_log = 0; switch (vhost->action) { case IBMVFC_HOST_ACTION_NONE: - case IBMVFC_HOST_ACTION_LOGO_WAIT: case IBMVFC_HOST_ACTION_INIT_WAIT: break; - case IBMVFC_HOST_ACTION_LOGO: - vhost->job_step(vhost); - break; case IBMVFC_HOST_ACTION_INIT: BUG_ON(vhost->state != IBMVFC_INITIALIZING); if (vhost->delay_init) { @@ -3967,21 +3836,11 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) if (vhost->state == IBMVFC_INITIALIZING) { if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) { - if (vhost->reinit) { - vhost->reinit = 0; - scsi_block_requests(vhost->host); - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); - spin_unlock_irqrestore(vhost->host->host_lock, flags); - } else { - ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); - ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); - wake_up(&vhost->init_wait_q); - schedule_work(&vhost->rport_add_work_q); - vhost->init_retries = 0; - spin_unlock_irqrestore(vhost->host->host_lock, flags); - scsi_unblock_requests(vhost->host); - } - + ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD); + vhost->init_retries = 0; + spin_unlock_irqrestore(vhost->host->host_lock, flags); + scsi_unblock_requests(vhost->host); return; } else { ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); @@ -4012,6 +3871,24 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost) if (!ibmvfc_dev_init_to_do(vhost)) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED); break; + case IBMVFC_HOST_ACTION_TGT_ADD: + list_for_each_entry(tgt, &vhost->targets, queue) { + if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) { + spin_unlock_irqrestore(vhost->host->host_lock, flags); + ibmvfc_tgt_add_rport(tgt); + return; + } + } + + if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) { + vhost->reinit = 0; + scsi_block_requests(vhost->host); + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY); + } else { + ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); + wake_up(&vhost->init_wait_q); + } + break; default: break; }; @@ -4240,56 +4117,6 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost) return -ENOMEM; } -/** - * ibmvfc_rport_add_thread - Worker thread for rport adds - * @work: work struct - * - **/ -static void ibmvfc_rport_add_thread(struct work_struct *work) -{ - struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host, - rport_add_work_q); - struct ibmvfc_target *tgt; - struct fc_rport *rport; - unsigned long flags; - int did_work; - - ENTER; - spin_lock_irqsave(vhost->host->host_lock, flags); - do { - did_work = 0; - if (vhost->state != IBMVFC_ACTIVE) - break; - - list_for_each_entry(tgt, &vhost->targets, queue) { - if (tgt->add_rport) { - did_work = 1; - tgt->add_rport = 0; - kref_get(&tgt->kref); - rport = tgt->rport; - if (!rport) { - spin_unlock_irqrestore(vhost->host->host_lock, flags); - ibmvfc_tgt_add_rport(tgt); - } else if (get_device(&rport->dev)) { - spin_unlock_irqrestore(vhost->host->host_lock, flags); - tgt_dbg(tgt, "Setting rport roles\n"); - fc_remote_port_rolechg(rport, tgt->ids.roles); - put_device(&rport->dev); - } - - kref_put(&tgt->kref, ibmvfc_release_tgt); - spin_lock_irqsave(vhost->host->host_lock, flags); - break; - } - } - } while(did_work); - - if (vhost->state == IBMVFC_ACTIVE) - vhost->scan_complete = 1; - spin_unlock_irqrestore(vhost->host->host_lock, flags); - LEAVE; -} - /** * ibmvfc_probe - Adapter hot plug add entry point * @vdev: vio device struct @@ -4333,7 +4160,6 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) strcpy(vhost->partition_name, "UNKNOWN"); init_waitqueue_head(&vhost->work_wait_q); init_waitqueue_head(&vhost->init_wait_q); - INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread); if ((rc = ibmvfc_alloc_mem(vhost))) goto free_scsi_host; diff --git a/trunk/drivers/scsi/ibmvscsi/ibmvfc.h b/trunk/drivers/scsi/ibmvscsi/ibmvfc.h index c2668d7d67f5..ca1dcf7a7568 100644 --- a/trunk/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/trunk/drivers/scsi/ibmvscsi/ibmvfc.h @@ -29,8 +29,8 @@ #include "viosrp.h" #define IBMVFC_NAME "ibmvfc" -#define IBMVFC_DRIVER_VERSION "1.0.6" -#define IBMVFC_DRIVER_DATE "(May 28, 2009)" +#define IBMVFC_DRIVER_VERSION "1.0.5" +#define IBMVFC_DRIVER_DATE "(March 19, 2009)" #define IBMVFC_DEFAULT_TIMEOUT 60 #define IBMVFC_ADISC_CANCEL_TIMEOUT 45 @@ -57,10 +57,9 @@ * Ensure we have resources for ERP and initialization: * 1 for ERP * 1 for initialization - * 1 for NPIV Logout * 2 for each discovery thread */ -#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + 1 + (disc_threads * 2)) +#define IBMVFC_NUM_INTERNAL_REQ (1 + 1 + (disc_threads * 2)) #define IBMVFC_MAD_SUCCESS 0x00 #define IBMVFC_MAD_NOT_SUPPORTED 0xF1 @@ -128,7 +127,6 @@ enum ibmvfc_mad_types { IBMVFC_IMPLICIT_LOGOUT = 0x0040, IBMVFC_PASSTHRU = 0x0200, IBMVFC_TMF_MAD = 0x0100, - IBMVFC_NPIV_LOGOUT = 0x0800, }; struct ibmvfc_mad_common { @@ -145,10 +143,6 @@ struct ibmvfc_npiv_login_mad { struct srp_direct_buf buffer; }__attribute__((packed, aligned (8))); -struct ibmvfc_npiv_logout_mad { - struct ibmvfc_mad_common common; -}__attribute__((packed, aligned (8))); - #define IBMVFC_MAX_NAME 256 struct ibmvfc_npiv_login { @@ -207,8 +201,7 @@ struct ibmvfc_npiv_login_resp { #define IBMVFC_NATIVE_FC 0x01 #define IBMVFC_CAN_FLUSH_ON_HALT 0x08 u32 reserved; - u64 capabilities; -#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 + u64 capabilites; u32 max_cmds; u32 scsi_id_sz; u64 max_dma_len; @@ -548,17 +541,9 @@ struct ibmvfc_crq_queue { dma_addr_t msg_token; }; -enum ibmvfc_ae_link_state { - IBMVFC_AE_LS_LINK_UP = 0x01, - IBMVFC_AE_LS_LINK_BOUNCED = 0x02, - IBMVFC_AE_LS_LINK_DOWN = 0x04, - IBMVFC_AE_LS_LINK_DEAD = 0x08, -}; - struct ibmvfc_async_crq { volatile u8 valid; - u8 link_state; - u8 pad[2]; + u8 pad[3]; u32 pad2; volatile u64 event; volatile u64 scsi_id; @@ -576,7 +561,6 @@ struct ibmvfc_async_crq_queue { union ibmvfc_iu { struct ibmvfc_mad_common mad_common; struct ibmvfc_npiv_login_mad npiv_login; - struct ibmvfc_npiv_logout_mad npiv_logout; struct ibmvfc_discover_targets discover_targets; struct ibmvfc_port_login plogi; struct ibmvfc_process_login prli; @@ -591,6 +575,7 @@ enum ibmvfc_target_action { IBMVFC_TGT_ACTION_NONE = 0, IBMVFC_TGT_ACTION_INIT, IBMVFC_TGT_ACTION_INIT_WAIT, + IBMVFC_TGT_ACTION_ADD_RPORT, IBMVFC_TGT_ACTION_DEL_RPORT, }; @@ -603,7 +588,6 @@ struct ibmvfc_target { int target_id; enum ibmvfc_target_action action; int need_login; - int add_rport; int init_retries; u32 cancel_key; struct ibmvfc_service_parms service_parms; @@ -643,8 +627,6 @@ struct ibmvfc_event_pool { enum ibmvfc_host_action { IBMVFC_HOST_ACTION_NONE = 0, - IBMVFC_HOST_ACTION_LOGO, - IBMVFC_HOST_ACTION_LOGO_WAIT, IBMVFC_HOST_ACTION_INIT, IBMVFC_HOST_ACTION_INIT_WAIT, IBMVFC_HOST_ACTION_QUERY, @@ -653,6 +635,7 @@ enum ibmvfc_host_action { IBMVFC_HOST_ACTION_ALLOC_TGTS, IBMVFC_HOST_ACTION_TGT_INIT, IBMVFC_HOST_ACTION_TGT_DEL_FAILED, + IBMVFC_HOST_ACTION_TGT_ADD, }; enum ibmvfc_host_state { @@ -699,8 +682,6 @@ struct ibmvfc_host { int client_migrated; int reinit; int delay_init; - int scan_complete; - int logged_in; int events_to_log; #define IBMVFC_AE_LINKUP 0x0001 #define IBMVFC_AE_LINKDOWN 0x0002 @@ -711,7 +692,6 @@ struct ibmvfc_host { void (*job_step) (struct ibmvfc_host *); struct task_struct *work_thread; struct tasklet_struct tasklet; - struct work_struct rport_add_work_q; wait_queue_head_t init_wait_q; wait_queue_head_t work_wait_q; }; @@ -727,12 +707,6 @@ struct ibmvfc_host { #define tgt_err(t, fmt, ...) \ dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__) -#define tgt_log(t, level, fmt, ...) \ - do { \ - if ((t)->vhost->log_level >= level) \ - tgt_err(t, fmt, ##__VA_ARGS__); \ - } while (0) - #define ibmvfc_dbg(vhost, ...) \ DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__)) diff --git a/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c b/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c index 11d2602ae88e..c9aa7611e408 100644 --- a/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -70,7 +70,6 @@ #include #include #include -#include #include #include #include @@ -88,15 +87,9 @@ */ static int max_id = 64; static int max_channel = 3; -static int init_timeout = 300; -static int login_timeout = 60; -static int info_timeout = 30; -static int abort_timeout = 60; -static int reset_timeout = 60; +static int init_timeout = 5; static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; -static int fast_fail = 1; -static int client_reserve = 1; static struct scsi_transport_template *ibmvscsi_transport_template; @@ -117,10 +110,6 @@ module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds"); module_param_named(max_requests, max_requests, int, S_IRUGO); MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter"); -module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]"); -module_param_named(client_reserve, client_reserve, int, S_IRUGO ); -MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release"); /* ------------------------------------------------------------ * Routines for the event pool and event structs @@ -792,53 +781,105 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, /* ------------------------------------------------------------ * Routines for driver initialization */ - /** - * map_persist_bufs: - Pre-map persistent data for adapter logins - * @hostdata: ibmvscsi_host_data of host + * adapter_info_rsp: - Handle response to MAD adapter info request + * @evt_struct: srp_event_struct with the response * - * Map the capabilities and adapter info DMA buffers to avoid runtime failures. - * Return 1 on error, 0 on success. - */ -static int map_persist_bufs(struct ibmvscsi_host_data *hostdata) + * Used as a "done" callback by when sending adapter_info. Gets called + * by ibmvscsi_handle_crq() +*/ +static void adapter_info_rsp(struct srp_event_struct *evt_struct) { + struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; + dma_unmap_single(hostdata->dev, + evt_struct->iu.mad.adapter_info.buffer, + evt_struct->iu.mad.adapter_info.common.length, + DMA_BIDIRECTIONAL); - hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps, - sizeof(hostdata->caps), DMA_BIDIRECTIONAL); - - if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) { - dev_err(hostdata->dev, "Unable to map capabilities buffer!\n"); - return 1; - } - - hostdata->adapter_info_addr = dma_map_single(hostdata->dev, - &hostdata->madapter_info, - sizeof(hostdata->madapter_info), - DMA_BIDIRECTIONAL); - if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) { - dev_err(hostdata->dev, "Unable to map adapter info buffer!\n"); - dma_unmap_single(hostdata->dev, hostdata->caps_addr, - sizeof(hostdata->caps), DMA_BIDIRECTIONAL); - return 1; + if (evt_struct->xfer_iu->mad.adapter_info.common.status) { + dev_err(hostdata->dev, "error %d getting adapter info\n", + evt_struct->xfer_iu->mad.adapter_info.common.status); + } else { + dev_info(hostdata->dev, "host srp version: %s, " + "host partition %s (%d), OS %d, max io %u\n", + hostdata->madapter_info.srp_version, + hostdata->madapter_info.partition_name, + hostdata->madapter_info.partition_number, + hostdata->madapter_info.os_type, + hostdata->madapter_info.port_max_txu[0]); + + if (hostdata->madapter_info.port_max_txu[0]) + hostdata->host->max_sectors = + hostdata->madapter_info.port_max_txu[0] >> 9; + + if (hostdata->madapter_info.os_type == 3 && + strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { + dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", + hostdata->madapter_info.srp_version); + dev_err(hostdata->dev, "limiting scatterlists to %d\n", + MAX_INDIRECT_BUFS); + hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; + } } - - return 0; } /** - * unmap_persist_bufs: - Unmap persistent data needed for adapter logins - * @hostdata: ibmvscsi_host_data of host - * - * Unmap the capabilities and adapter info DMA buffers - */ -static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata) + * send_mad_adapter_info: - Sends the mad adapter info request + * and stores the result so it can be retrieved with + * sysfs. We COULD consider causing a failure if the + * returned SRP version doesn't match ours. + * @hostdata: ibmvscsi_host_data of host + * + * Returns zero if successful. +*/ +static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) { - dma_unmap_single(hostdata->dev, hostdata->caps_addr, - sizeof(hostdata->caps), DMA_BIDIRECTIONAL); + struct viosrp_adapter_info *req; + struct srp_event_struct *evt_struct; + unsigned long flags; + dma_addr_t addr; - dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr, - sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL); -} + evt_struct = get_event_struct(&hostdata->pool); + if (!evt_struct) { + dev_err(hostdata->dev, + "couldn't allocate an event for ADAPTER_INFO_REQ!\n"); + return; + } + + init_event_struct(evt_struct, + adapter_info_rsp, + VIOSRP_MAD_FORMAT, + init_timeout); + + req = &evt_struct->iu.mad.adapter_info; + memset(req, 0x00, sizeof(*req)); + + req->common.type = VIOSRP_ADAPTER_INFO_TYPE; + req->common.length = sizeof(hostdata->madapter_info); + req->buffer = addr = dma_map_single(hostdata->dev, + &hostdata->madapter_info, + sizeof(hostdata->madapter_info), + DMA_BIDIRECTIONAL); + + if (dma_mapping_error(hostdata->dev, req->buffer)) { + if (!firmware_has_feature(FW_FEATURE_CMO)) + dev_err(hostdata->dev, + "Unable to map request_buffer for " + "adapter_info!\n"); + free_event_struct(&hostdata->pool, evt_struct); + return; + } + + spin_lock_irqsave(hostdata->host->host_lock, flags); + if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) { + dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n"); + dma_unmap_single(hostdata->dev, + addr, + sizeof(hostdata->madapter_info), + DMA_BIDIRECTIONAL); + } + spin_unlock_irqrestore(hostdata->host->host_lock, flags); +}; /** * login_rsp: - Handle response to SRP login request @@ -868,7 +909,9 @@ static void login_rsp(struct srp_event_struct *evt_struct) } dev_info(hostdata->dev, "SRP_LOGIN succeeded\n"); - hostdata->client_migrated = 0; + + if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0) + dev_err(hostdata->dev, "Invalid request_limit.\n"); /* Now we know what the real request-limit is. * This value is set rather than added to request_limit because @@ -879,12 +922,15 @@ static void login_rsp(struct srp_event_struct *evt_struct) /* If we had any pending I/Os, kick them */ scsi_unblock_requests(hostdata->host); + + send_mad_adapter_info(hostdata); + return; } /** * send_srp_login: - Sends the srp login * @hostdata: ibmvscsi_host_data of host - * + * * Returns zero if successful. */ static int send_srp_login(struct ibmvscsi_host_data *hostdata) @@ -893,17 +939,22 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) unsigned long flags; struct srp_login_req *login; struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool); + if (!evt_struct) { + dev_err(hostdata->dev, "couldn't allocate an event for login req!\n"); + return FAILED; + } - BUG_ON(!evt_struct); - init_event_struct(evt_struct, login_rsp, - VIOSRP_SRP_FORMAT, login_timeout); + init_event_struct(evt_struct, + login_rsp, + VIOSRP_SRP_FORMAT, + init_timeout); login = &evt_struct->iu.srp.login_req; - memset(login, 0, sizeof(*login)); + memset(login, 0x00, sizeof(struct srp_login_req)); login->opcode = SRP_LOGIN_REQ; login->req_it_iu_len = sizeof(union srp_iu); login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; - + spin_lock_irqsave(hostdata->host->host_lock, flags); /* Start out with a request limit of 0, since this is negotiated in * the login request we are just sending and login requests always @@ -911,240 +962,12 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) */ atomic_set(&hostdata->request_limit, 0); - rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2); + rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); spin_unlock_irqrestore(hostdata->host->host_lock, flags); dev_info(hostdata->dev, "sent SRP login\n"); return rc; }; -/** - * capabilities_rsp: - Handle response to MAD adapter capabilities request - * @evt_struct: srp_event_struct with the response - * - * Used as a "done" callback by when sending adapter_info. - */ -static void capabilities_rsp(struct srp_event_struct *evt_struct) -{ - struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; - - if (evt_struct->xfer_iu->mad.capabilities.common.status) { - dev_err(hostdata->dev, "error 0x%X getting capabilities info\n", - evt_struct->xfer_iu->mad.capabilities.common.status); - } else { - if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP) - dev_info(hostdata->dev, "Partition migration not supported\n"); - - if (client_reserve) { - if (hostdata->caps.reserve.common.server_support == - SERVER_SUPPORTS_CAP) - dev_info(hostdata->dev, "Client reserve enabled\n"); - else - dev_info(hostdata->dev, "Client reserve not supported\n"); - } - } - - send_srp_login(hostdata); -} - -/** - * send_mad_capabilities: - Sends the mad capabilities request - * and stores the result so it can be retrieved with - * @hostdata: ibmvscsi_host_data of host - */ -static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) -{ - struct viosrp_capabilities *req; - struct srp_event_struct *evt_struct; - unsigned long flags; - struct device_node *of_node = hostdata->dev->archdata.of_node; - const char *location; - - evt_struct = get_event_struct(&hostdata->pool); - BUG_ON(!evt_struct); - - init_event_struct(evt_struct, capabilities_rsp, - VIOSRP_MAD_FORMAT, info_timeout); - - req = &evt_struct->iu.mad.capabilities; - memset(req, 0, sizeof(*req)); - - hostdata->caps.flags = CAP_LIST_SUPPORTED; - if (hostdata->client_migrated) - hostdata->caps.flags |= CLIENT_MIGRATED; - - strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), - sizeof(hostdata->caps.name)); - hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0'; - - location = of_get_property(of_node, "ibm,loc-code", NULL); - location = location ? location : dev_name(hostdata->dev); - strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); - hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0'; - - req->common.type = VIOSRP_CAPABILITIES_TYPE; - req->buffer = hostdata->caps_addr; - - hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES; - hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration); - hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP; - hostdata->caps.migration.ecl = 1; - - if (client_reserve) { - hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES; - hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve); - hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP; - hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2; - req->common.length = sizeof(hostdata->caps); - } else - req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve); - - spin_lock_irqsave(hostdata->host->host_lock, flags); - if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) - dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n"); - spin_unlock_irqrestore(hostdata->host->host_lock, flags); -}; - -/** - * fast_fail_rsp: - Handle response to MAD enable fast fail - * @evt_struct: srp_event_struct with the response - * - * Used as a "done" callback by when sending enable fast fail. Gets called - * by ibmvscsi_handle_crq() - */ -static void fast_fail_rsp(struct srp_event_struct *evt_struct) -{ - struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; - u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status; - - if (status == VIOSRP_MAD_NOT_SUPPORTED) - dev_err(hostdata->dev, "fast_fail not supported in server\n"); - else if (status == VIOSRP_MAD_FAILED) - dev_err(hostdata->dev, "fast_fail request failed\n"); - else if (status != VIOSRP_MAD_SUCCESS) - dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status); - - send_mad_capabilities(hostdata); -} - -/** - * init_host - Start host initialization - * @hostdata: ibmvscsi_host_data of host - * - * Returns zero if successful. - */ -static int enable_fast_fail(struct ibmvscsi_host_data *hostdata) -{ - int rc; - unsigned long flags; - struct viosrp_fast_fail *fast_fail_mad; - struct srp_event_struct *evt_struct; - - if (!fast_fail) { - send_mad_capabilities(hostdata); - return 0; - } - - evt_struct = get_event_struct(&hostdata->pool); - BUG_ON(!evt_struct); - - init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout); - - fast_fail_mad = &evt_struct->iu.mad.fast_fail; - memset(fast_fail_mad, 0, sizeof(*fast_fail_mad)); - fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL; - fast_fail_mad->common.length = sizeof(*fast_fail_mad); - - spin_lock_irqsave(hostdata->host->host_lock, flags); - rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); - spin_unlock_irqrestore(hostdata->host->host_lock, flags); - return rc; -} - -/** - * adapter_info_rsp: - Handle response to MAD adapter info request - * @evt_struct: srp_event_struct with the response - * - * Used as a "done" callback by when sending adapter_info. Gets called - * by ibmvscsi_handle_crq() -*/ -static void adapter_info_rsp(struct srp_event_struct *evt_struct) -{ - struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; - - if (evt_struct->xfer_iu->mad.adapter_info.common.status) { - dev_err(hostdata->dev, "error %d getting adapter info\n", - evt_struct->xfer_iu->mad.adapter_info.common.status); - } else { - dev_info(hostdata->dev, "host srp version: %s, " - "host partition %s (%d), OS %d, max io %u\n", - hostdata->madapter_info.srp_version, - hostdata->madapter_info.partition_name, - hostdata->madapter_info.partition_number, - hostdata->madapter_info.os_type, - hostdata->madapter_info.port_max_txu[0]); - - if (hostdata->madapter_info.port_max_txu[0]) - hostdata->host->max_sectors = - hostdata->madapter_info.port_max_txu[0] >> 9; - - if (hostdata->madapter_info.os_type == 3 && - strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { - dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", - hostdata->madapter_info.srp_version); - dev_err(hostdata->dev, "limiting scatterlists to %d\n", - MAX_INDIRECT_BUFS); - hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; - } - } - - enable_fast_fail(hostdata); -} - -/** - * send_mad_adapter_info: - Sends the mad adapter info request - * and stores the result so it can be retrieved with - * sysfs. We COULD consider causing a failure if the - * returned SRP version doesn't match ours. - * @hostdata: ibmvscsi_host_data of host - * - * Returns zero if successful. -*/ -static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) -{ - struct viosrp_adapter_info *req; - struct srp_event_struct *evt_struct; - unsigned long flags; - - evt_struct = get_event_struct(&hostdata->pool); - BUG_ON(!evt_struct); - - init_event_struct(evt_struct, - adapter_info_rsp, - VIOSRP_MAD_FORMAT, - info_timeout); - - req = &evt_struct->iu.mad.adapter_info; - memset(req, 0x00, sizeof(*req)); - - req->common.type = VIOSRP_ADAPTER_INFO_TYPE; - req->common.length = sizeof(hostdata->madapter_info); - req->buffer = hostdata->adapter_info_addr; - - spin_lock_irqsave(hostdata->host->host_lock, flags); - if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) - dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n"); - spin_unlock_irqrestore(hostdata->host->host_lock, flags); -}; - -/** - * init_adapter: Start virtual adapter initialization sequence - * - */ -static void init_adapter(struct ibmvscsi_host_data *hostdata) -{ - send_mad_adapter_info(hostdata); -} - /** * sync_completion: Signal that a synchronous command has completed * Note that after returning from this call, the evt_struct is freed. @@ -1206,7 +1029,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) init_event_struct(evt, sync_completion, VIOSRP_SRP_FORMAT, - abort_timeout); + init_timeout); tsk_mgmt = &evt->iu.srp.tsk_mgmt; @@ -1220,7 +1043,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) evt->sync_srp = &srp_rsp; init_completion(&evt->comp); - rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2); + rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) break; @@ -1329,7 +1152,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) init_event_struct(evt, sync_completion, VIOSRP_SRP_FORMAT, - reset_timeout); + init_timeout); tsk_mgmt = &evt->iu.srp.tsk_mgmt; @@ -1342,7 +1165,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) evt->sync_srp = &srp_rsp; init_completion(&evt->comp); - rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2); + rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2); if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY) break; @@ -1458,7 +1281,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, if ((rc = ibmvscsi_ops->send_crq(hostdata, 0xC002000000000000LL, 0)) == 0) { /* Now login */ - init_adapter(hostdata); + send_srp_login(hostdata); } else { dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc); } @@ -1468,7 +1291,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, dev_info(hostdata->dev, "partner initialization complete\n"); /* Now login */ - init_adapter(hostdata); + send_srp_login(hostdata); break; default: dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format); @@ -1480,7 +1303,6 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, if (crq->format == 0x06) { /* We need to re-setup the interpartition connection */ dev_info(hostdata->dev, "Re-enabling adapter!\n"); - hostdata->client_migrated = 1; purge_requests(hostdata, DID_REQUEUE); if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue, hostdata)) || @@ -1575,7 +1397,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, init_event_struct(evt_struct, sync_completion, VIOSRP_MAD_FORMAT, - info_timeout); + init_timeout); host_config = &evt_struct->iu.mad.host_config; @@ -1597,7 +1419,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, init_completion(&evt_struct->comp); spin_lock_irqsave(hostdata->host->host_lock, flags); - rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); + rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2); spin_unlock_irqrestore(hostdata->host->host_lock, flags); if (rc == 0) wait_for_completion(&evt_struct->comp); @@ -1622,7 +1444,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev) spin_lock_irqsave(shost->host_lock, lock_flags); if (sdev->type == TYPE_DISK) { sdev->allow_restart = 1; - blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); + blk_queue_rq_timeout(sdev->request_queue, 60 * HZ); } scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); spin_unlock_irqrestore(shost->host_lock, lock_flags); @@ -1649,46 +1471,6 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) /* ------------------------------------------------------------ * sysfs attributes */ -static ssize_t show_host_vhost_loc(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct ibmvscsi_host_data *hostdata = shost_priv(shost); - int len; - - len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n", - hostdata->caps.loc); - return len; -} - -static struct device_attribute ibmvscsi_host_vhost_loc = { - .attr = { - .name = "vhost_loc", - .mode = S_IRUGO, - }, - .show = show_host_vhost_loc, -}; - -static ssize_t show_host_vhost_name(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct ibmvscsi_host_data *hostdata = shost_priv(shost); - int len; - - len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n", - hostdata->caps.name); - return len; -} - -static struct device_attribute ibmvscsi_host_vhost_name = { - .attr = { - .name = "vhost_name", - .mode = S_IRUGO, - }, - .show = show_host_vhost_name, -}; - static ssize_t show_host_srp_version(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1812,8 +1594,6 @@ static struct device_attribute ibmvscsi_host_config = { }; static struct device_attribute *ibmvscsi_attrs[] = { - &ibmvscsi_host_vhost_loc, - &ibmvscsi_host_vhost_name, &ibmvscsi_host_srp_version, &ibmvscsi_host_partition_name, &ibmvscsi_host_partition_number, @@ -1894,11 +1674,6 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) atomic_set(&hostdata->request_limit, -1); hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT; - if (map_persist_bufs(hostdata)) { - dev_err(&vdev->dev, "couldn't map persistent buffers\n"); - goto persist_bufs_failed; - } - rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events); if (rc != 0 && rc != H_RESOURCE) { dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc); @@ -1912,7 +1687,6 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) host->max_lun = 8; host->max_id = max_id; host->max_channel = max_channel; - host->max_cmd_len = 16; if (scsi_add_host(hostdata->host, hostdata->dev)) goto add_host_failed; @@ -1959,8 +1733,6 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) init_pool_failed: ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); init_crq_failed: - unmap_persist_bufs(hostdata); - persist_bufs_failed: scsi_host_put(host); scsi_host_alloc_failed: return -1; @@ -1969,7 +1741,6 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) static int ibmvscsi_remove(struct vio_dev *vdev) { struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data; - unmap_persist_bufs(hostdata); release_event_pool(&hostdata->pool, hostdata); ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events); diff --git a/trunk/drivers/scsi/ibmvscsi/ibmvscsi.h b/trunk/drivers/scsi/ibmvscsi/ibmvscsi.h index 76425303def0..2d4339d5e16e 100644 --- a/trunk/drivers/scsi/ibmvscsi/ibmvscsi.h +++ b/trunk/drivers/scsi/ibmvscsi/ibmvscsi.h @@ -90,7 +90,6 @@ struct event_pool { /* all driver data associated with a host adapter */ struct ibmvscsi_host_data { atomic_t request_limit; - int client_migrated; struct device *dev; struct event_pool pool; struct crq_queue queue; @@ -98,9 +97,6 @@ struct ibmvscsi_host_data { struct list_head sent; struct Scsi_Host *host; struct mad_adapter_info_data madapter_info; - struct capabilities caps; - dma_addr_t caps_addr; - dma_addr_t adapter_info_addr; }; /* routines for managing a command/response queue */ diff --git a/trunk/drivers/scsi/ibmvscsi/viosrp.h b/trunk/drivers/scsi/ibmvscsi/viosrp.h index 2cd735d1d196..204604501ad8 100644 --- a/trunk/drivers/scsi/ibmvscsi/viosrp.h +++ b/trunk/drivers/scsi/ibmvscsi/viosrp.h @@ -37,7 +37,6 @@ #define SRP_VERSION "16.a" #define SRP_MAX_IU_LEN 256 -#define SRP_MAX_LOC_LEN 32 union srp_iu { struct srp_login_req login_req; @@ -87,37 +86,7 @@ enum viosrp_mad_types { VIOSRP_EMPTY_IU_TYPE = 0x01, VIOSRP_ERROR_LOG_TYPE = 0x02, VIOSRP_ADAPTER_INFO_TYPE = 0x03, - VIOSRP_HOST_CONFIG_TYPE = 0x04, - VIOSRP_CAPABILITIES_TYPE = 0x05, - VIOSRP_ENABLE_FAST_FAIL = 0x08, -}; - -enum viosrp_mad_status { - VIOSRP_MAD_SUCCESS = 0x00, - VIOSRP_MAD_NOT_SUPPORTED = 0xF1, - VIOSRP_MAD_FAILED = 0xF7, -}; - -enum viosrp_capability_type { - MIGRATION_CAPABILITIES = 0x01, - RESERVATION_CAPABILITIES = 0x02, -}; - -enum viosrp_capability_support { - SERVER_DOES_NOT_SUPPORTS_CAP = 0x0, - SERVER_SUPPORTS_CAP = 0x01, - SERVER_CAP_DATA = 0x02, -}; - -enum viosrp_reserve_type { - CLIENT_RESERVE_SCSI_2 = 0x01, -}; - -enum viosrp_capability_flag { - CLIENT_MIGRATED = 0x01, - CLIENT_RECONNECT = 0x02, - CAP_LIST_SUPPORTED = 0x04, - CAP_LIST_DATA = 0x08, + VIOSRP_HOST_CONFIG_TYPE = 0x04 }; /* @@ -158,46 +127,11 @@ struct viosrp_host_config { u64 buffer; }; -struct viosrp_fast_fail { - struct mad_common common; -}; - -struct viosrp_capabilities { - struct mad_common common; - u64 buffer; -}; - -struct mad_capability_common { - u32 cap_type; - u16 length; - u16 server_support; -}; - -struct mad_reserve_cap { - struct mad_capability_common common; - u32 type; -}; - -struct mad_migration_cap { - struct mad_capability_common common; - u32 ecl; -}; - -struct capabilities{ - u32 flags; - char name[SRP_MAX_LOC_LEN]; - char loc[SRP_MAX_LOC_LEN]; - struct mad_migration_cap migration; - struct mad_reserve_cap reserve; -}; - union mad_iu { struct viosrp_empty_iu empty_iu; struct viosrp_error_log error_log; struct viosrp_adapter_info adapter_info; struct viosrp_host_config host_config; - struct viosrp_fast_fail fast_fail; - struct viosrp_capabilities capabilities; }; union viosrp_iu { diff --git a/trunk/drivers/scsi/ipr.c b/trunk/drivers/scsi/ipr.c index 0f8bc772b112..dd689ded8609 100644 --- a/trunk/drivers/scsi/ipr.c +++ b/trunk/drivers/scsi/ipr.c @@ -7003,7 +7003,6 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev) ioa_cfg->sdt_state = ABORT_DUMP; ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; ioa_cfg->in_ioa_bringdown = 1; - ioa_cfg->allow_cmds = 0; ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); } @@ -7689,7 +7688,7 @@ static void __ipr_remove(struct pci_dev *pdev) * Return value: * none **/ -static void __devexit ipr_remove(struct pci_dev *pdev) +static void ipr_remove(struct pci_dev *pdev) { struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); @@ -7865,7 +7864,7 @@ static struct pci_driver ipr_driver = { .name = IPR_NAME, .id_table = ipr_pci_table, .probe = ipr_probe, - .remove = __devexit_p(ipr_remove), + .remove = ipr_remove, .shutdown = ipr_shutdown, .err_handler = &ipr_err_handler, }; diff --git a/trunk/drivers/scsi/libfc/fc_exch.c b/trunk/drivers/scsi/libfc/fc_exch.c index 7af9bceb8aa9..992af05aacf1 100644 --- a/trunk/drivers/scsi/libfc/fc_exch.c +++ b/trunk/drivers/scsi/libfc/fc_exch.c @@ -1159,10 +1159,6 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) atomic_inc(&mp->stats.xid_not_found); goto out; } - if (ep->esb_stat & ESB_ST_COMPLETE) { - atomic_inc(&mp->stats.xid_not_found); - goto out; - } if (ep->rxid == FC_XID_UNKNOWN) ep->rxid = ntohs(fh->fh_rx_id); if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { diff --git a/trunk/drivers/scsi/libfc/fc_fcp.c b/trunk/drivers/scsi/libfc/fc_fcp.c index ad8b747837b0..521f996f9b13 100644 --- a/trunk/drivers/scsi/libfc/fc_fcp.c +++ b/trunk/drivers/scsi/libfc/fc_fcp.c @@ -1896,7 +1896,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status; break; case FC_CMD_ABORTED: - sc_cmd->result = (DID_ERROR << 16) | fsp->io_status; + sc_cmd->result = (DID_ABORT << 16) | fsp->io_status; break; case FC_CMD_TIME_OUT: sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status; diff --git a/trunk/drivers/scsi/libfc/fc_rport.c b/trunk/drivers/scsi/libfc/fc_rport.c index 7bfbff7e0efb..747d73c5c8af 100644 --- a/trunk/drivers/scsi/libfc/fc_rport.c +++ b/trunk/drivers/scsi/libfc/fc_rport.c @@ -478,7 +478,7 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp) if (PTR_ERR(fp) == -FC_EX_CLOSED) return fc_rport_error(rport, fp); - if (rdata->retries < rdata->local_port->max_rport_retry_count) { + if (rdata->retries < rdata->local_port->max_retry_count) { FC_DEBUG_RPORT("Error %ld in state %s, retrying\n", PTR_ERR(fp), fc_rport_state(rport)); rdata->retries++; @@ -1330,7 +1330,7 @@ int fc_rport_init(struct fc_lport *lport) } EXPORT_SYMBOL(fc_rport_init); -int fc_setup_rport(void) +int fc_setup_rport() { rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); if (!rport_event_queue) @@ -1339,7 +1339,7 @@ int fc_setup_rport(void) } EXPORT_SYMBOL(fc_setup_rport); -void fc_destroy_rport(void) +void fc_destroy_rport() { destroy_workqueue(rport_event_queue); } diff --git a/trunk/drivers/scsi/libiscsi.c b/trunk/drivers/scsi/libiscsi.c index 59908aead531..e72b4ad47d35 100644 --- a/trunk/drivers/scsi/libiscsi.c +++ b/trunk/drivers/scsi/libiscsi.c @@ -81,8 +81,7 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn) struct Scsi_Host *shost = conn->session->host; struct iscsi_host *ihost = shost_priv(shost); - if (ihost->workq) - queue_work(ihost->workq, &conn->xmitwork); + queue_work(ihost->workq, &conn->xmitwork); } EXPORT_SYMBOL_GPL(iscsi_conn_queue_work); @@ -110,9 +109,11 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) * if the window closed with IO queued, then kick the * xmit thread */ - if (!list_empty(&session->leadconn->cmdqueue) || - !list_empty(&session->leadconn->mgmtqueue)) - iscsi_conn_queue_work(session->leadconn); + if (!list_empty(&session->leadconn->xmitqueue) || + !list_empty(&session->leadconn->mgmtqueue)) { + if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD)) + iscsi_conn_queue_work(session->leadconn); + } } } EXPORT_SYMBOL_GPL(iscsi_update_cmdsn); @@ -256,11 +257,9 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) itt_t itt; int rc; - if (conn->session->tt->alloc_pdu) { - rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); - if (rc) - return rc; - } + rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD); + if (rc) + return rc; hdr = (struct iscsi_cmd *) task->hdr; itt = hdr->itt; memset(hdr, 0, sizeof(*hdr)); @@ -365,6 +364,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) return -EIO; task->state = ISCSI_TASK_RUNNING; + list_move_tail(&task->running, &conn->run_list); conn->scsicmd_pdus_cnt++; ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x " @@ -380,25 +380,26 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task) } /** - * iscsi_free_task - free a task + * iscsi_complete_command - finish a task * @task: iscsi cmd task * * Must be called with session lock. * This function returns the scsi command to scsi-ml or cleans * up mgmt tasks then returns the task to the pool. */ -static void iscsi_free_task(struct iscsi_task *task) +static void iscsi_complete_command(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct iscsi_session *session = conn->session; struct scsi_cmnd *sc = task->sc; - ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n", - task->itt, task->state, task->sc); - session->tt->cleanup_task(task); - task->state = ISCSI_TASK_FREE; + list_del_init(&task->running); + task->state = ISCSI_TASK_COMPLETED; task->sc = NULL; + + if (conn->task == task) + conn->task = NULL; /* * login task is preallocated so do not free */ @@ -407,6 +408,9 @@ static void iscsi_free_task(struct iscsi_task *task) __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*)); + if (conn->ping_task == task) + conn->ping_task = NULL; + if (sc) { task->sc = NULL; /* SCSI eh reuses commands to verify us */ @@ -429,7 +433,7 @@ EXPORT_SYMBOL_GPL(__iscsi_get_task); static void __iscsi_put_task(struct iscsi_task *task) { if (atomic_dec_and_test(&task->refcount)) - iscsi_free_task(task); + iscsi_complete_command(task); } void iscsi_put_task(struct iscsi_task *task) @@ -442,74 +446,26 @@ void iscsi_put_task(struct iscsi_task *task) } EXPORT_SYMBOL_GPL(iscsi_put_task); -/** - * iscsi_complete_task - finish a task - * @task: iscsi cmd task - * @state: state to complete task with - * - * Must be called with session lock. - */ -static void iscsi_complete_task(struct iscsi_task *task, int state) -{ - struct iscsi_conn *conn = task->conn; - - ISCSI_DBG_SESSION(conn->session, - "complete task itt 0x%x state %d sc %p\n", - task->itt, task->state, task->sc); - if (task->state == ISCSI_TASK_COMPLETED || - task->state == ISCSI_TASK_ABRT_TMF || - task->state == ISCSI_TASK_ABRT_SESS_RECOV) - return; - WARN_ON_ONCE(task->state == ISCSI_TASK_FREE); - task->state = state; - - if (!list_empty(&task->running)) - list_del_init(&task->running); - - if (conn->task == task) - conn->task = NULL; - - if (conn->ping_task == task) - conn->ping_task = NULL; - - /* release get from queueing */ - __iscsi_put_task(task); -} - /* - * session lock must be held and if not called for a task that is - * still pending or from the xmit thread, then xmit thread must - * be suspended. + * session lock must be held */ -static void fail_scsi_task(struct iscsi_task *task, int err) +static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task, + int err) { - struct iscsi_conn *conn = task->conn; struct scsi_cmnd *sc; - int state; - /* - * if a command completes and we get a successful tmf response - * we will hit this because the scsi eh abort code does not take - * a ref to the task. - */ sc = task->sc; if (!sc) return; - if (task->state == ISCSI_TASK_PENDING) { + if (task->state == ISCSI_TASK_PENDING) /* * cmd never made it to the xmit thread, so we should not count * the cmd in the sequencing */ conn->session->queued_cmdsn--; - /* it was never sent so just complete like normal */ - state = ISCSI_TASK_COMPLETED; - } else if (err == DID_TRANSPORT_DISRUPTED) - state = ISCSI_TASK_ABRT_SESS_RECOV; - else - state = ISCSI_TASK_ABRT_TMF; - sc->result = err << 16; + sc->result = err; if (!scsi_bidi_cmnd(sc)) scsi_set_resid(sc, scsi_bufflen(sc)); else { @@ -517,7 +473,10 @@ static void fail_scsi_task(struct iscsi_task *task, int err) scsi_in(sc)->resid = scsi_in(sc)->length; } - iscsi_complete_task(task, state); + if (conn->task == task) + conn->task = NULL; + /* release ref from queuecommand */ + __iscsi_put_task(task); } static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, @@ -557,6 +516,7 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, session->state = ISCSI_STATE_LOGGING_OUT; task->state = ISCSI_TASK_RUNNING; + list_move_tail(&task->running, &conn->mgmt_run_list); ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x " "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt, task->data_count); @@ -568,7 +528,6 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size) { struct iscsi_session *session = conn->session; - struct iscsi_host *ihost = shost_priv(session->host); struct iscsi_task *task; itt_t itt; @@ -585,9 +544,6 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, */ task = conn->login_task; else { - if (session->state != ISCSI_STATE_LOGGED_IN) - return NULL; - BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); @@ -603,8 +559,6 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, atomic_set(&task->refcount, 1); task->conn = conn; task->sc = NULL; - INIT_LIST_HEAD(&task->running); - task->state = ISCSI_TASK_PENDING; if (data_size) { memcpy(task->data, data, data_size); @@ -612,14 +566,11 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, } else task->data_count = 0; - if (conn->session->tt->alloc_pdu) { - if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { - iscsi_conn_printk(KERN_ERR, conn, "Could not allocate " - "pdu for mgmt task.\n"); - goto free_task; - } + if (conn->session->tt->alloc_pdu(task, hdr->opcode)) { + iscsi_conn_printk(KERN_ERR, conn, "Could not allocate " + "pdu for mgmt task.\n"); + goto requeue_task; } - itt = task->hdr->itt; task->hdr_len = sizeof(struct iscsi_hdr); memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr)); @@ -632,22 +583,30 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, task->conn->session->age); } - if (!ihost->workq) { + INIT_LIST_HEAD(&task->running); + list_add_tail(&task->running, &conn->mgmtqueue); + + if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { if (iscsi_prep_mgmt_task(conn, task)) goto free_task; if (session->tt->xmit_task(task)) goto free_task; - } else { - list_add_tail(&task->running, &conn->mgmtqueue); + + } else iscsi_conn_queue_work(conn); - } return task; free_task: __iscsi_put_task(task); return NULL; + +requeue_task: + if (task != conn->login_task) + __kfifo_put(session->cmdpool.queue, (void*)&task, + sizeof(void*)); + return NULL; } int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr, @@ -742,10 +701,11 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; } out: - ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n", + ISCSI_DBG_SESSION(session, "done [sc %p res %d itt 0x%x]\n", sc, sc->result, task->itt); conn->scsirsp_pdus_cnt++; - iscsi_complete_task(task, ISCSI_TASK_COMPLETED); + + __iscsi_put_task(task); } /** @@ -764,7 +724,6 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) return; - iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr); sc->result = (DID_OK << 16) | rhdr->cmd_status; conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1; if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW | @@ -779,11 +738,8 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status; } - ISCSI_DBG_SESSION(conn->session, "data in with status done " - "[sc %p res %d itt 0x%x]\n", - sc, sc->result, task->itt); conn->scsirsp_pdus_cnt++; - iscsi_complete_task(task, ISCSI_TASK_COMPLETED); + __iscsi_put_task(task); } static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr) @@ -867,7 +823,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr, * * The session lock must be held. */ -struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) +static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) { struct iscsi_session *session = conn->session; int i; @@ -884,7 +840,6 @@ struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt) return session->cmds[i]; } -EXPORT_SYMBOL_GPL(iscsi_itt_to_task); /** * __iscsi_complete_pdu - complete pdu @@ -1004,7 +959,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, } iscsi_tmf_rsp(conn, hdr); - iscsi_complete_task(task, ISCSI_TASK_COMPLETED); + __iscsi_put_task(task); break; case ISCSI_OP_NOOP_IN: iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr); @@ -1022,7 +977,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, goto recv_pdu; mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout); - iscsi_complete_task(task, ISCSI_TASK_COMPLETED); + __iscsi_put_task(task); break; default: rc = ISCSI_ERR_BAD_OPCODE; @@ -1034,7 +989,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, recv_pdu: if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen)) rc = ISCSI_ERR_CONN_FAILED; - iscsi_complete_task(task, ISCSI_TASK_COMPLETED); + __iscsi_put_task(task); return rc; } EXPORT_SYMBOL_GPL(__iscsi_complete_pdu); @@ -1211,12 +1166,7 @@ void iscsi_requeue_task(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; - /* - * this may be on the requeue list already if the xmit_task callout - * is handling the r2ts while we are adding new ones - */ - if (list_empty(&task->running)) - list_add_tail(&task->running, &conn->requeue); + list_move_tail(&task->running, &conn->requeue); iscsi_conn_queue_work(conn); } EXPORT_SYMBOL_GPL(iscsi_requeue_task); @@ -1256,7 +1206,6 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) while (!list_empty(&conn->mgmtqueue)) { conn->task = list_entry(conn->mgmtqueue.next, struct iscsi_task, running); - list_del_init(&conn->task->running); if (iscsi_prep_mgmt_task(conn, conn->task)) { __iscsi_put_task(conn->task); conn->task = NULL; @@ -1268,26 +1217,23 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) } /* process pending command queue */ - while (!list_empty(&conn->cmdqueue)) { + while (!list_empty(&conn->xmitqueue)) { if (conn->tmf_state == TMF_QUEUED) break; - conn->task = list_entry(conn->cmdqueue.next, + conn->task = list_entry(conn->xmitqueue.next, struct iscsi_task, running); - list_del_init(&conn->task->running); if (conn->session->state == ISCSI_STATE_LOGGING_OUT) { - fail_scsi_task(conn->task, DID_IMM_RETRY); + fail_command(conn, conn->task, DID_IMM_RETRY << 16); continue; } rc = iscsi_prep_scsi_cmd_pdu(conn->task); if (rc) { if (rc == -ENOMEM) { - list_add_tail(&conn->task->running, - &conn->cmdqueue); conn->task = NULL; goto again; } else - fail_scsi_task(conn->task, DID_ABORT); + fail_command(conn, conn->task, DID_ABORT << 16); continue; } rc = iscsi_xmit_task(conn); @@ -1314,8 +1260,8 @@ static int iscsi_data_xmit(struct iscsi_conn *conn) conn->task = list_entry(conn->requeue.next, struct iscsi_task, running); - list_del_init(&conn->task->running); conn->task->state = ISCSI_TASK_RUNNING; + list_move_tail(conn->requeue.next, &conn->run_list); rc = iscsi_xmit_task(conn); if (rc) goto again; @@ -1382,7 +1328,6 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { struct iscsi_cls_session *cls_session; struct Scsi_Host *host; - struct iscsi_host *ihost; int reason = 0; struct iscsi_session *session; struct iscsi_conn *conn; @@ -1393,7 +1338,6 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) sc->SCp.ptr = NULL; host = sc->device->host; - ihost = shost_priv(host); spin_unlock(host->host_lock); cls_session = starget_to_session(scsi_target(sc->device)); @@ -1406,7 +1350,13 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) goto fault; } - if (session->state != ISCSI_STATE_LOGGED_IN) { + /* + * ISCSI_STATE_FAILED is a temp. state. The recovery + * code will decide what is best to do with command queued + * during this time + */ + if (session->state != ISCSI_STATE_LOGGED_IN && + session->state != ISCSI_STATE_FAILED) { /* * to handle the race between when we set the recovery state * and block the session we requeue here (commands could @@ -1414,15 +1364,12 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) * up because the block code is not locked) */ switch (session->state) { - case ISCSI_STATE_FAILED: case ISCSI_STATE_IN_RECOVERY: reason = FAILURE_SESSION_IN_RECOVERY; - sc->result = DID_IMM_RETRY << 16; - break; + goto reject; case ISCSI_STATE_LOGGING_OUT: reason = FAILURE_SESSION_LOGGING_OUT; - sc->result = DID_IMM_RETRY << 16; - break; + goto reject; case ISCSI_STATE_RECOVERY_FAILED: reason = FAILURE_SESSION_RECOVERY_TIMEOUT; sc->result = DID_TRANSPORT_FAILFAST << 16; @@ -1455,8 +1402,9 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) reason = FAILURE_OOM; goto reject; } + list_add_tail(&task->running, &conn->xmitqueue); - if (!ihost->workq) { + if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) { reason = iscsi_prep_scsi_cmd_pdu(task); if (reason) { if (reason == -ENOMEM) { @@ -1471,10 +1419,8 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) reason = FAILURE_SESSION_NOT_READY; goto prepd_reject; } - } else { - list_add_tail(&task->running, &conn->cmdqueue); + } else iscsi_conn_queue_work(conn); - } session->queued_cmdsn++; spin_unlock(&session->lock); @@ -1483,7 +1429,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) prepd_reject: sc->scsi_done = NULL; - iscsi_complete_task(task, ISCSI_TASK_COMPLETED); + iscsi_complete_command(task); reject: spin_unlock(&session->lock); ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", @@ -1493,7 +1439,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) prepd_fault: sc->scsi_done = NULL; - iscsi_complete_task(task, ISCSI_TASK_COMPLETED); + iscsi_complete_command(task); fault: spin_unlock(&session->lock); ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", @@ -1662,24 +1608,44 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, * Fail commands. session lock held and recv side suspended and xmit * thread flushed */ -static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun, - int error) +static void fail_all_commands(struct iscsi_conn *conn, unsigned lun, + int error) { - struct iscsi_task *task; - int i; + struct iscsi_task *task, *tmp; - for (i = 0; i < conn->session->cmds_max; i++) { - task = conn->session->cmds[i]; - if (!task->sc || task->state == ISCSI_TASK_FREE) - continue; + if (conn->task) { + if (lun == -1 || + (conn->task->sc && conn->task->sc->device->lun == lun)) + conn->task = NULL; + } - if (lun != -1 && lun != task->sc->device->lun) - continue; + /* flush pending */ + list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) { + if (lun == task->sc->device->lun || lun == -1) { + ISCSI_DBG_SESSION(conn->session, + "failing pending sc %p itt 0x%x\n", + task->sc, task->itt); + fail_command(conn, task, error << 16); + } + } - ISCSI_DBG_SESSION(conn->session, - "failing sc %p itt 0x%x state %d\n", - task->sc, task->itt, task->state); - fail_scsi_task(task, error); + list_for_each_entry_safe(task, tmp, &conn->requeue, running) { + if (lun == task->sc->device->lun || lun == -1) { + ISCSI_DBG_SESSION(conn->session, + "failing requeued sc %p itt 0x%x\n", + task->sc, task->itt); + fail_command(conn, task, error << 16); + } + } + + /* fail all other running */ + list_for_each_entry_safe(task, tmp, &conn->run_list, running) { + if (lun == task->sc->device->lun || lun == -1) { + ISCSI_DBG_SESSION(conn->session, + "failing in progress sc %p itt 0x%x\n", + task->sc, task->itt); + fail_command(conn, task, error << 16); + } } } @@ -1689,7 +1655,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn) struct iscsi_host *ihost = shost_priv(shost); set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); - if (ihost->workq) + if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) flush_workqueue(ihost->workq); } EXPORT_SYMBOL_GPL(iscsi_suspend_tx); @@ -1697,23 +1663,8 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx); static void iscsi_start_tx(struct iscsi_conn *conn) { clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); - iscsi_conn_queue_work(conn); -} - -/* - * We want to make sure a ping is in flight. It has timed out. - * And we are not busy processing a pdu that is making - * progress but got started before the ping and is taking a while - * to complete so the ping is just stuck behind it in a queue. - */ -static int iscsi_has_ping_timed_out(struct iscsi_conn *conn) -{ - if (conn->ping_task && - time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + - (conn->ping_timeout * HZ), jiffies)) - return 1; - else - return 0; + if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD)) + iscsi_conn_queue_work(conn); } static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) @@ -1751,20 +1702,16 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd) * if the ping timedout then we are in the middle of cleaning up * and can let the iscsi eh handle it */ - if (iscsi_has_ping_timed_out(conn)) { + if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) + + (conn->ping_timeout * HZ), jiffies)) rc = BLK_EH_RESET_TIMER; - goto done; - } /* * if we are about to check the transport then give the command * more time */ if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ), - jiffies)) { + jiffies)) rc = BLK_EH_RESET_TIMER; - goto done; - } - /* if in the middle of checking the transport then give us more time */ if (conn->ping_task) rc = BLK_EH_RESET_TIMER; @@ -1791,13 +1738,13 @@ static void iscsi_check_transport_timeouts(unsigned long data) recv_timeout *= HZ; last_recv = conn->last_recv; - - if (iscsi_has_ping_timed_out(conn)) { + if (conn->ping_task && + time_before_eq(conn->last_ping + (conn->ping_timeout * HZ), + jiffies)) { iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs " - "expired, recv timeout %d, last rx %lu, " - "last ping %lu, now %lu\n", - conn->ping_timeout, conn->recv_timeout, - last_recv, conn->last_ping, jiffies); + "expired, last rx %lu, last ping %lu, " + "now %lu\n", conn->ping_timeout, last_recv, + conn->last_ping, jiffies); spin_unlock(&session->lock); iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return; @@ -1841,8 +1788,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) cls_session = starget_to_session(scsi_target(sc->device)); session = cls_session->dd_data; - ISCSI_DBG_SESSION(session, "aborting sc %p\n", sc); - mutex_lock(&session->eh_mutex); spin_lock_bh(&session->lock); /* @@ -1865,8 +1810,6 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) sc->SCp.phase != session->age) { spin_unlock_bh(&session->lock); mutex_unlock(&session->eh_mutex); - ISCSI_DBG_SESSION(session, "failing abort due to dropped " - "session.\n"); return FAILED; } @@ -1886,7 +1829,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) } if (task->state == ISCSI_TASK_PENDING) { - fail_scsi_task(task, DID_ABORT); + fail_command(conn, task, DID_ABORT << 16); goto success; } @@ -1917,7 +1860,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc) * then sent more data for the cmd. */ spin_lock(&session->lock); - fail_scsi_task(task, DID_ABORT); + fail_command(conn, task, DID_ABORT << 16); conn->tmf_state = TMF_INITIAL; spin_unlock(&session->lock); iscsi_start_tx(conn); @@ -2024,7 +1967,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc) iscsi_suspend_tx(conn); spin_lock_bh(&session->lock); - fail_scsi_tasks(conn, sc->device->lun, DID_ERROR); + fail_all_commands(conn, sc->device->lun, DID_ERROR); conn->tmf_state = TMF_INITIAL; spin_unlock_bh(&session->lock); @@ -2331,7 +2274,6 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost, if (cmd_task_size) task->dd_data = &task[1]; task->itt = cmd_i; - task->state = ISCSI_TASK_FREE; INIT_LIST_HEAD(&task->running); } @@ -2418,8 +2360,10 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, conn->transport_timer.data = (unsigned long)conn; conn->transport_timer.function = iscsi_check_transport_timeouts; + INIT_LIST_HEAD(&conn->run_list); + INIT_LIST_HEAD(&conn->mgmt_run_list); INIT_LIST_HEAD(&conn->mgmtqueue); - INIT_LIST_HEAD(&conn->cmdqueue); + INIT_LIST_HEAD(&conn->xmitqueue); INIT_LIST_HEAD(&conn->requeue); INIT_WORK(&conn->xmitwork, iscsi_xmitworker); @@ -2587,28 +2531,27 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn) EXPORT_SYMBOL_GPL(iscsi_conn_start); static void -fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn) +flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn) { - struct iscsi_task *task; - int i, state; + struct iscsi_task *task, *tmp; - for (i = 0; i < conn->session->cmds_max; i++) { - task = conn->session->cmds[i]; - if (task->sc) - continue; - - if (task->state == ISCSI_TASK_FREE) - continue; - - ISCSI_DBG_SESSION(conn->session, - "failing mgmt itt 0x%x state %d\n", - task->itt, task->state); - state = ISCSI_TASK_ABRT_SESS_RECOV; - if (task->state == ISCSI_TASK_PENDING) - state = ISCSI_TASK_COMPLETED; - iscsi_complete_task(task, state); + /* handle pending */ + list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) { + ISCSI_DBG_SESSION(session, "flushing pending mgmt task " + "itt 0x%x\n", task->itt); + /* release ref from prep task */ + __iscsi_put_task(task); + } + /* handle running */ + list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) { + ISCSI_DBG_SESSION(session, "flushing running mgmt task " + "itt 0x%x\n", task->itt); + /* release ref from prep task */ + __iscsi_put_task(task); } + + conn->task = NULL; } static void iscsi_start_session_recovery(struct iscsi_session *session, @@ -2616,6 +2559,8 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, { int old_stop_stage; + del_timer_sync(&conn->transport_timer); + mutex_lock(&session->eh_mutex); spin_lock_bh(&session->lock); if (conn->stop_stage == STOP_CONN_TERM) { @@ -2633,17 +2578,13 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, session->state = ISCSI_STATE_TERMINATE; else if (conn->stop_stage != STOP_CONN_RECOVER) session->state = ISCSI_STATE_IN_RECOVERY; - spin_unlock_bh(&session->lock); - - del_timer_sync(&conn->transport_timer); - iscsi_suspend_tx(conn); - spin_lock_bh(&session->lock); old_stop_stage = conn->stop_stage; conn->stop_stage = flag; conn->c_stage = ISCSI_CONN_STOPPED; spin_unlock_bh(&session->lock); + iscsi_suspend_tx(conn); /* * for connection level recovery we should not calculate * header digest. conn->hdr_size used for optimization @@ -2664,8 +2605,11 @@ static void iscsi_start_session_recovery(struct iscsi_session *session, * flush queues. */ spin_lock_bh(&session->lock); - fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED); - fail_mgmt_tasks(session, conn); + if (flag == STOP_CONN_RECOVER) + fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED); + else + fail_all_commands(conn, -1, DID_ERROR); + flush_control_queues(session, conn); spin_unlock_bh(&session->lock); mutex_unlock(&session->eh_mutex); } @@ -2707,23 +2651,6 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session, } EXPORT_SYMBOL_GPL(iscsi_conn_bind); -static int iscsi_switch_str_param(char **param, char *new_val_buf) -{ - char *new_val; - - if (*param) { - if (!strcmp(*param, new_val_buf)) - return 0; - } - - new_val = kstrdup(new_val_buf, GFP_NOIO); - if (!new_val) - return -ENOMEM; - - kfree(*param); - *param = new_val; - return 0; -} int iscsi_set_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf, int buflen) @@ -2796,15 +2723,38 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn, sscanf(buf, "%u", &conn->exp_statsn); break; case ISCSI_PARAM_USERNAME: - return iscsi_switch_str_param(&session->username, buf); + kfree(session->username); + session->username = kstrdup(buf, GFP_KERNEL); + if (!session->username) + return -ENOMEM; + break; case ISCSI_PARAM_USERNAME_IN: - return iscsi_switch_str_param(&session->username_in, buf); + kfree(session->username_in); + session->username_in = kstrdup(buf, GFP_KERNEL); + if (!session->username_in) + return -ENOMEM; + break; case ISCSI_PARAM_PASSWORD: - return iscsi_switch_str_param(&session->password, buf); + kfree(session->password); + session->password = kstrdup(buf, GFP_KERNEL); + if (!session->password) + return -ENOMEM; + break; case ISCSI_PARAM_PASSWORD_IN: - return iscsi_switch_str_param(&session->password_in, buf); + kfree(session->password_in); + session->password_in = kstrdup(buf, GFP_KERNEL); + if (!session->password_in) + return -ENOMEM; + break; case ISCSI_PARAM_TARGET_NAME: - return iscsi_switch_str_param(&session->targetname, buf); + /* this should not change between logins */ + if (session->targetname) + break; + + session->targetname = kstrdup(buf, GFP_KERNEL); + if (!session->targetname) + return -ENOMEM; + break; case ISCSI_PARAM_TPGT: sscanf(buf, "%d", &session->tpgt); break; @@ -2812,11 +2762,25 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn, sscanf(buf, "%d", &conn->persistent_port); break; case ISCSI_PARAM_PERSISTENT_ADDRESS: - return iscsi_switch_str_param(&conn->persistent_address, buf); + /* + * this is the address returned in discovery so it should + * not change between logins. + */ + if (conn->persistent_address) + break; + + conn->persistent_address = kstrdup(buf, GFP_KERNEL); + if (!conn->persistent_address) + return -ENOMEM; + break; case ISCSI_PARAM_IFACE_NAME: - return iscsi_switch_str_param(&session->ifacename, buf); + if (!session->ifacename) + session->ifacename = kstrdup(buf, GFP_KERNEL); + break; case ISCSI_PARAM_INITIATOR_NAME: - return iscsi_switch_str_param(&session->initiatorname, buf); + if (!session->initiatorname) + session->initiatorname = kstrdup(buf, GFP_KERNEL); + break; default: return -ENOSYS; } @@ -2887,7 +2851,10 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session, len = sprintf(buf, "%s\n", session->ifacename); break; case ISCSI_PARAM_INITIATOR_NAME: - len = sprintf(buf, "%s\n", session->initiatorname); + if (!session->initiatorname) + len = sprintf(buf, "%s\n", "unknown"); + else + len = sprintf(buf, "%s\n", session->initiatorname); break; default: return -ENOSYS; @@ -2953,16 +2920,29 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: - len = sprintf(buf, "%s\n", ihost->netdev); + if (!ihost->netdev) + len = sprintf(buf, "%s\n", "default"); + else + len = sprintf(buf, "%s\n", ihost->netdev); break; case ISCSI_HOST_PARAM_HWADDRESS: - len = sprintf(buf, "%s\n", ihost->hwaddress); + if (!ihost->hwaddress) + len = sprintf(buf, "%s\n", "default"); + else + len = sprintf(buf, "%s\n", ihost->hwaddress); break; case ISCSI_HOST_PARAM_INITIATOR_NAME: - len = sprintf(buf, "%s\n", ihost->initiatorname); + if (!ihost->initiatorname) + len = sprintf(buf, "%s\n", "unknown"); + else + len = sprintf(buf, "%s\n", ihost->initiatorname); break; case ISCSI_HOST_PARAM_IPADDRESS: - len = sprintf(buf, "%s\n", ihost->local_address); + if (!strlen(ihost->local_address)) + len = sprintf(buf, "%s\n", "unknown"); + else + len = sprintf(buf, "%s\n", + ihost->local_address); break; default: return -ENOSYS; @@ -2979,11 +2959,17 @@ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param, switch (param) { case ISCSI_HOST_PARAM_NETDEV_NAME: - return iscsi_switch_str_param(&ihost->netdev, buf); + if (!ihost->netdev) + ihost->netdev = kstrdup(buf, GFP_KERNEL); + break; case ISCSI_HOST_PARAM_HWADDRESS: - return iscsi_switch_str_param(&ihost->hwaddress, buf); + if (!ihost->hwaddress) + ihost->hwaddress = kstrdup(buf, GFP_KERNEL); + break; case ISCSI_HOST_PARAM_INITIATOR_NAME: - return iscsi_switch_str_param(&ihost->initiatorname, buf); + if (!ihost->initiatorname) + ihost->initiatorname = kstrdup(buf, GFP_KERNEL); + break; default: return -ENOSYS; } diff --git a/trunk/drivers/scsi/libiscsi_tcp.c b/trunk/drivers/scsi/libiscsi_tcp.c index 2bc07090321d..b579ca9f4836 100644 --- a/trunk/drivers/scsi/libiscsi_tcp.c +++ b/trunk/drivers/scsi/libiscsi_tcp.c @@ -440,8 +440,8 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task) struct iscsi_tcp_task *tcp_task = task->dd_data; struct iscsi_r2t_info *r2t; - /* nothing to do for mgmt */ - if (!task->sc) + /* nothing to do for mgmt or pending tasks */ + if (!task->sc || task->state == ISCSI_TASK_PENDING) return; /* flush task's r2t queues */ @@ -473,13 +473,7 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task) int datasn = be32_to_cpu(rhdr->datasn); unsigned total_in_length = scsi_in(task->sc)->length; - /* - * lib iscsi will update this in the completion handling if there - * is status. - */ - if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS)) - iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr); - + iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr); if (tcp_conn->in.datalen == 0) return 0; @@ -863,12 +857,6 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb, int rc = 0; ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset); - /* - * Update for each skb instead of pdu, because over slow networks a - * data_in's data could take a while to read in. We also want to - * account for r2ts. - */ - conn->last_recv = jiffies; if (unlikely(conn->suspend_rx)) { ISCSI_DBG_TCP(conn, "Rx suspended!\n"); diff --git a/trunk/drivers/scsi/lpfc/lpfc.h b/trunk/drivers/scsi/lpfc/lpfc.h index 540569849099..1105f9a111ba 100644 --- a/trunk/drivers/scsi/lpfc/lpfc.h +++ b/trunk/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -23,13 +23,6 @@ struct lpfc_sli2_slim; -#define LPFC_PCI_DEV_LP 0x1 -#define LPFC_PCI_DEV_OC 0x2 - -#define LPFC_SLI_REV2 2 -#define LPFC_SLI_REV3 3 -#define LPFC_SLI_REV4 4 - #define LPFC_MAX_TARGET 4096 /* max number of targets supported */ #define LPFC_MAX_DISC_THREADS 64 /* max outstanding discovery els requests */ @@ -105,11 +98,9 @@ struct lpfc_dma_pool { }; struct hbq_dmabuf { - struct lpfc_dmabuf hbuf; struct lpfc_dmabuf dbuf; uint32_t size; uint32_t tag; - struct lpfc_rcqe rcqe; }; /* Priority bit. Set value to exceed low water mark in lpfc_mem. */ @@ -143,10 +134,7 @@ typedef struct lpfc_vpd { } rev; struct { #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd3 :19; /* Reserved */ - uint32_t cdss : 1; /* Configure Data Security SLI */ - uint32_t rsvd2 : 3; /* Reserved */ - uint32_t cbg : 1; /* Configure BlockGuard */ + uint32_t rsvd2 :24; /* Reserved */ uint32_t cmv : 1; /* Configure Max VPIs */ uint32_t ccrp : 1; /* Config Command Ring Polling */ uint32_t csah : 1; /* Configure Synchronous Abort Handling */ @@ -164,10 +152,7 @@ typedef struct lpfc_vpd { uint32_t csah : 1; /* Configure Synchronous Abort Handling */ uint32_t ccrp : 1; /* Config Command Ring Polling */ uint32_t cmv : 1; /* Configure Max VPIs */ - uint32_t cbg : 1; /* Configure BlockGuard */ - uint32_t rsvd2 : 3; /* Reserved */ - uint32_t cdss : 1; /* Configure Data Security SLI */ - uint32_t rsvd3 :19; /* Reserved */ + uint32_t rsvd2 :24; /* Reserved */ #endif } sli3Feat; } lpfc_vpd_t; @@ -279,8 +264,8 @@ enum hba_state { }; struct lpfc_vport { - struct lpfc_hba *phba; struct list_head listentry; + struct lpfc_hba *phba; uint8_t port_type; #define LPFC_PHYSICAL_PORT 1 #define LPFC_NPIV_PORT 2 @@ -288,9 +273,6 @@ struct lpfc_vport { enum discovery_state port_state; uint16_t vpi; - uint16_t vfi; - uint8_t vfi_state; -#define LPFC_VFI_REGISTERED 0x1 uint32_t fc_flag; /* FC flags */ /* Several of these flags are HBA centric and should be moved to @@ -403,9 +385,6 @@ struct lpfc_vport { #endif uint8_t stat_data_enabled; uint8_t stat_data_blocked; - struct list_head rcv_buffer_list; - uint32_t vport_flag; -#define STATIC_VPORT 1 }; struct hbq_s { @@ -441,66 +420,8 @@ enum intr_type_t { }; struct lpfc_hba { - /* SCSI interface function jump table entries */ - int (*lpfc_new_scsi_buf) - (struct lpfc_vport *, int); - struct lpfc_scsi_buf * (*lpfc_get_scsi_buf) - (struct lpfc_hba *); - int (*lpfc_scsi_prep_dma_buf) - (struct lpfc_hba *, struct lpfc_scsi_buf *); - void (*lpfc_scsi_unprep_dma_buf) - (struct lpfc_hba *, struct lpfc_scsi_buf *); - void (*lpfc_release_scsi_buf) - (struct lpfc_hba *, struct lpfc_scsi_buf *); - void (*lpfc_rampdown_queue_depth) - (struct lpfc_hba *); - void (*lpfc_scsi_prep_cmnd) - (struct lpfc_vport *, struct lpfc_scsi_buf *, - struct lpfc_nodelist *); - int (*lpfc_scsi_prep_task_mgmt_cmd) - (struct lpfc_vport *, struct lpfc_scsi_buf *, - unsigned int, uint8_t); - - /* IOCB interface function jump table entries */ - int (*__lpfc_sli_issue_iocb) - (struct lpfc_hba *, uint32_t, - struct lpfc_iocbq *, uint32_t); - void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *, - struct lpfc_iocbq *); - int (*lpfc_hba_down_post)(struct lpfc_hba *phba); - - - IOCB_t * (*lpfc_get_iocb_from_iocbq) - (struct lpfc_iocbq *); - void (*lpfc_scsi_cmd_iocb_cmpl) - (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); - - /* MBOX interface function jump table entries */ - int (*lpfc_sli_issue_mbox) - (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); - /* Slow-path IOCB process function jump table entries */ - void (*lpfc_sli_handle_slow_ring_event) - (struct lpfc_hba *phba, struct lpfc_sli_ring *pring, - uint32_t mask); - /* INIT device interface function jump table entries */ - int (*lpfc_sli_hbq_to_firmware) - (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *); - int (*lpfc_sli_brdrestart) - (struct lpfc_hba *); - int (*lpfc_sli_brdready) - (struct lpfc_hba *, uint32_t); - void (*lpfc_handle_eratt) - (struct lpfc_hba *); - void (*lpfc_stop_port) - (struct lpfc_hba *); - - - /* SLI4 specific HBA data structure */ - struct lpfc_sli4_hba sli4_hba; - struct lpfc_sli sli; - uint8_t pci_dev_grp; /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */ - uint32_t sli_rev; /* SLI2, SLI3, or SLI4 */ + uint32_t sli_rev; /* SLI2 or SLI3 */ uint32_t sli3_options; /* Mask of enabled SLI3 options */ #define LPFC_SLI3_HBQ_ENABLED 0x01 #define LPFC_SLI3_NPIV_ENABLED 0x02 @@ -508,7 +429,6 @@ struct lpfc_hba { #define LPFC_SLI3_CRP_ENABLED 0x08 #define LPFC_SLI3_INB_ENABLED 0x10 #define LPFC_SLI3_BG_ENABLED 0x20 -#define LPFC_SLI3_DSS_ENABLED 0x40 uint32_t iocb_cmd_size; uint32_t iocb_rsp_size; @@ -522,13 +442,8 @@ struct lpfc_hba { uint32_t hba_flag; /* hba generic flags */ #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */ -#define DEFER_ERATT 0x2 /* Deferred error attention in progress */ -#define HBA_FCOE_SUPPORT 0x4 /* HBA function supports FCOE */ -#define HBA_RECEIVE_BUFFER 0x8 /* Rcv buffer posted to worker thread */ -#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ -#define FCP_XRI_ABORT_EVENT 0x20 -#define ELS_XRI_ABORT_EVENT 0x40 -#define ASYNC_EVENT 0x80 + +#define DEFER_ERATT 0x4 /* Deferred error attention in progress */ struct lpfc_dmabuf slim2p; MAILBOX_t *mbox; @@ -587,9 +502,6 @@ struct lpfc_hba { uint32_t cfg_poll; uint32_t cfg_poll_tmo; uint32_t cfg_use_msi; - uint32_t cfg_fcp_imax; - uint32_t cfg_fcp_wq_count; - uint32_t cfg_fcp_eq_count; uint32_t cfg_sg_seg_cnt; uint32_t cfg_prot_sg_seg_cnt; uint32_t cfg_sg_dma_buf_size; @@ -599,8 +511,6 @@ struct lpfc_hba { uint32_t cfg_enable_hba_reset; uint32_t cfg_enable_hba_heartbeat; uint32_t cfg_enable_bg; - uint32_t cfg_enable_fip; - uint32_t cfg_log_verbose; lpfc_vpd_t vpd; /* vital product data */ @@ -616,12 +526,11 @@ struct lpfc_hba { unsigned long data_flags; uint32_t hbq_in_use; /* HBQs in use flag */ - struct list_head rb_pend_list; /* Received buffers to be processed */ + struct list_head hbqbuf_in_list; /* in-fly hbq buffer list */ uint32_t hbq_count; /* Count of configured HBQs */ struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */ unsigned long pci_bar0_map; /* Physical address for PCI BAR0 */ - unsigned long pci_bar1_map; /* Physical address for PCI BAR1 */ unsigned long pci_bar2_map; /* Physical address for PCI BAR2 */ void __iomem *slim_memmap_p; /* Kernel memory mapped address for PCI BAR0 */ @@ -684,8 +593,7 @@ struct lpfc_hba { /* pci_mem_pools */ struct pci_pool *lpfc_scsi_dma_buf_pool; struct pci_pool *lpfc_mbuf_pool; - struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */ - struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */ + struct pci_pool *lpfc_hbq_pool; struct lpfc_dma_pool lpfc_mbuf_safety_pool; mempool_t *mbox_mem_pool; @@ -701,14 +609,6 @@ struct lpfc_hba { struct lpfc_vport *pport; /* physical lpfc_vport pointer */ uint16_t max_vpi; /* Maximum virtual nports */ #define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */ - uint16_t max_vports; /* - * For IOV HBAs max_vpi can change - * after a reset. max_vports is max - * number of vports present. This can - * be greater than max_vpi. - */ - uint16_t vpi_base; - uint16_t vfi_base; unsigned long *vpi_bmask; /* vpi allocation table */ /* Data structure used by fabric iocb scheduler */ @@ -767,11 +667,6 @@ struct lpfc_hba { /* Maximum number of events that can be outstanding at any time*/ #define LPFC_MAX_EVT_COUNT 512 atomic_t fast_event_count; - struct lpfc_fcf fcf; - uint8_t fc_map[3]; - uint8_t valid_vlan; - uint16_t vlan_id; - struct list_head fcf_conn_rec_list; }; static inline struct Scsi_Host * diff --git a/trunk/drivers/scsi/lpfc/lpfc_attr.c b/trunk/drivers/scsi/lpfc/lpfc_attr.c index d73e677201f8..c14f0cbdb125 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_attr.c +++ b/trunk/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -30,10 +30,8 @@ #include #include -#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" -#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -507,14 +505,12 @@ lpfc_issue_lip(struct Scsi_Host *shost) return -ENOMEM; memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); - pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK; - pmboxq->u.mb.mbxOwner = OWN_HOST; + pmboxq->mb.mbxCommand = MBX_DOWN_LINK; + pmboxq->mb.mbxOwner = OWN_HOST; mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2); - if ((mbxstatus == MBX_SUCCESS) && - (pmboxq->u.mb.mbxStatus == 0 || - pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) { + if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) { memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t)); lpfc_init_link(phba, pmboxq, phba->cfg_topology, phba->cfg_link_speed); @@ -793,8 +789,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba, uint32_t *mrpi, uint32_t *arpi, uint32_t *mvpi, uint32_t *avpi) { - struct lpfc_sli *psli = &phba->sli; - struct lpfc_mbx_read_config *rd_config; + struct lpfc_sli *psli = &phba->sli; LPFC_MBOXQ_t *pmboxq; MAILBOX_t *pmb; int rc = 0; @@ -805,7 +800,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba, */ if (phba->link_state < LPFC_LINK_DOWN || !phba->mbox_mem_pool || - (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) + (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) return 0; if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) @@ -816,13 +811,13 @@ lpfc_get_hba_info(struct lpfc_hba *phba, return 0; memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); - pmb = &pmboxq->u.mb; + pmb = &pmboxq->mb; pmb->mbxCommand = MBX_READ_CONFIG; pmb->mbxOwner = OWN_HOST; pmboxq->context1 = NULL; if ((phba->pport->fc_flag & FC_OFFLINE_MODE) || - (!(psli->sli_flag & LPFC_SLI_ACTIVE))) + (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) rc = MBX_NOT_FINISHED; else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); @@ -833,37 +828,18 @@ lpfc_get_hba_info(struct lpfc_hba *phba, return 0; } - if (phba->sli_rev == LPFC_SLI_REV4) { - rd_config = &pmboxq->u.mqe.un.rd_config; - if (mrpi) - *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); - if (arpi) - *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) - - phba->sli4_hba.max_cfg_param.rpi_used; - if (mxri) - *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); - if (axri) - *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) - - phba->sli4_hba.max_cfg_param.xri_used; - if (mvpi) - *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); - if (avpi) - *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - - phba->sli4_hba.max_cfg_param.vpi_used; - } else { - if (mrpi) - *mrpi = pmb->un.varRdConfig.max_rpi; - if (arpi) - *arpi = pmb->un.varRdConfig.avail_rpi; - if (mxri) - *mxri = pmb->un.varRdConfig.max_xri; - if (axri) - *axri = pmb->un.varRdConfig.avail_xri; - if (mvpi) - *mvpi = pmb->un.varRdConfig.max_vpi; - if (avpi) - *avpi = pmb->un.varRdConfig.avail_vpi; - } + if (mrpi) + *mrpi = pmb->un.varRdConfig.max_rpi; + if (arpi) + *arpi = pmb->un.varRdConfig.avail_rpi; + if (mxri) + *mxri = pmb->un.varRdConfig.max_xri; + if (axri) + *axri = pmb->un.varRdConfig.avail_xri; + if (mvpi) + *mvpi = pmb->un.varRdConfig.max_vpi; + if (avpi) + *avpi = pmb->un.varRdConfig.avail_vpi; mempool_free(pmboxq, phba->mbox_mem_pool); return 1; @@ -2045,9 +2021,22 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR, # lpfc_log_verbose: Only turn this flag on if you are willing to risk being # deluged with LOTS of information. # You can set a bit mask to record specific types of verbose messages: -# See lpfc_logmsh.h for definitions. +# +# LOG_ELS 0x1 ELS events +# LOG_DISCOVERY 0x2 Link discovery events +# LOG_MBOX 0x4 Mailbox events +# LOG_INIT 0x8 Initialization events +# LOG_LINK_EVENT 0x10 Link events +# LOG_FCP 0x40 FCP traffic history +# LOG_NODE 0x80 Node table events +# LOG_BG 0x200 BlockBuard events +# LOG_MISC 0x400 Miscellaneous events +# LOG_SLI 0x800 SLI events +# LOG_FCP_ERROR 0x1000 Only log FCP errors +# LOG_LIBDFC 0x2000 LIBDFC events +# LOG_ALL_MSG 0xffff LOG all messages */ -LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff, +LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff, "Verbose logging bit-mask"); /* @@ -2277,36 +2266,6 @@ lpfc_param_init(topology, 0, 0, 6) static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR, lpfc_topology_show, lpfc_topology_store); -/** - * lpfc_static_vport_show: Read callback function for - * lpfc_static_vport sysfs file. - * @dev: Pointer to class device object. - * @attr: device attribute structure. - * @buf: Data buffer. - * - * This function is the read call back function for - * lpfc_static_vport sysfs file. The lpfc_static_vport - * sysfs file report the mageability of the vport. - **/ -static ssize_t -lpfc_static_vport_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct Scsi_Host *shost = class_to_shost(dev); - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - if (vport->vport_flag & STATIC_VPORT) - sprintf(buf, "1\n"); - else - sprintf(buf, "0\n"); - - return strlen(buf); -} - -/* - * Sysfs attribute to control the statistical data collection. - */ -static DEVICE_ATTR(lpfc_static_vport, S_IRUGO, - lpfc_static_vport_show, NULL); /** * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file @@ -2382,7 +2341,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr, if (vports == NULL) return -ENOMEM; - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { v_shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(v_shost->host_lock); /* Block and reset data collection */ @@ -2397,7 +2356,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr, phba->bucket_base = base; phba->bucket_step = step; - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { v_shost = lpfc_shost_from_vport(vports[i]); /* Unblock data collection */ @@ -2414,7 +2373,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr, if (vports == NULL) return -ENOMEM; - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { v_shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->stat_data_blocked = 1; @@ -2885,38 +2844,14 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, /* # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that # support this feature -# 0 = MSI disabled (default) +# 0 = MSI disabled # 1 = MSI enabled -# 2 = MSI-X enabled -# Value range is [0,2]. Default value is 0. +# 2 = MSI-X enabled (default) +# Value range is [0,2]. Default value is 2. */ -LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or " +LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or " "MSI-X (2), if possible"); -/* -# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second -# -# Value range is [636,651042]. Default value is 10000. -*/ -LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST, - "Set the maximum number of fast-path FCP interrupts per second"); - -/* -# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues -# -# Value range is [1,31]. Default value is 4. -*/ -LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX, - "Set the number of fast-path FCP work queues, if possible"); - -/* -# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues -# -# Value range is [1,7]. Default value is 1. -*/ -LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX, - "Set the number of fast-path FCP event queues, if possible"); - /* # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. # 0 = HBA resets disabled @@ -2941,14 +2876,6 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat."); */ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); -/* -# lpfc_enable_fip: When set, FIP is required to start discovery. If not -# set, the driver will add an FCF record manually if the port has no -# FCF records available and start discovery. -# Value range is [0,1]. Default value is 1 (enabled) -*/ -LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery"); - /* # lpfc_prot_mask: i @@ -3015,7 +2942,6 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_peer_port_login, &dev_attr_lpfc_nodev_tmo, &dev_attr_lpfc_devloss_tmo, - &dev_attr_lpfc_enable_fip, &dev_attr_lpfc_fcp_class, &dev_attr_lpfc_use_adisc, &dev_attr_lpfc_ack0, @@ -3043,9 +2969,6 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_poll, &dev_attr_lpfc_poll_tmo, &dev_attr_lpfc_use_msi, - &dev_attr_lpfc_fcp_imax, - &dev_attr_lpfc_fcp_wq_count, - &dev_attr_lpfc_fcp_eq_count, &dev_attr_lpfc_enable_bg, &dev_attr_lpfc_soft_wwnn, &dev_attr_lpfc_soft_wwpn, @@ -3068,7 +2991,6 @@ struct device_attribute *lpfc_vport_attrs[] = { &dev_attr_lpfc_lun_queue_depth, &dev_attr_lpfc_nodev_tmo, &dev_attr_lpfc_devloss_tmo, - &dev_attr_lpfc_enable_fip, &dev_attr_lpfc_hba_queue_depth, &dev_attr_lpfc_peer_port_login, &dev_attr_lpfc_restrict_login, @@ -3081,7 +3003,6 @@ struct device_attribute *lpfc_vport_attrs[] = { &dev_attr_lpfc_enable_da_id, &dev_attr_lpfc_max_scsicmpl_time, &dev_attr_lpfc_stat_data_ctrl, - &dev_attr_lpfc_static_vport, NULL, }; @@ -3278,7 +3199,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr, } } - memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off, + memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off, buf, count); phba->sysfs_mbox.offset = off + count; @@ -3320,7 +3241,6 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; int rc; - MAILBOX_t *pmb; if (off > MAILBOX_CMD_SIZE) return -ERANGE; @@ -3345,8 +3265,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, if (off == 0 && phba->sysfs_mbox.state == SMBOX_WRITING && phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) { - pmb = &phba->sysfs_mbox.mbox->u.mb; - switch (pmb->mbxCommand) { + + switch (phba->sysfs_mbox.mbox->mb.mbxCommand) { /* Offline only */ case MBX_INIT_LINK: case MBX_DOWN_LINK: @@ -3363,7 +3283,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, if (!(vport->fc_flag & FC_OFFLINE_MODE)) { printk(KERN_WARNING "mbox_read:Command 0x%x " "is illegal in on-line state\n", - pmb->mbxCommand); + phba->sysfs_mbox.mbox->mb.mbxCommand); sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EPERM; @@ -3399,13 +3319,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, case MBX_CONFIG_PORT: case MBX_RUN_BIU_DIAG: printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n", - pmb->mbxCommand); + phba->sysfs_mbox.mbox->mb.mbxCommand); sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EPERM; default: printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n", - pmb->mbxCommand); + phba->sysfs_mbox.mbox->mb.mbxCommand); sysfs_mbox_idle(phba); spin_unlock_irq(&phba->hbalock); return -EPERM; @@ -3415,14 +3335,14 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, * or RESTART mailbox commands until the HBA is restarted. */ if (phba->pport->stopped && - pmb->mbxCommand != MBX_DUMP_MEMORY && - pmb->mbxCommand != MBX_RESTART && - pmb->mbxCommand != MBX_WRITE_VPARMS && - pmb->mbxCommand != MBX_WRITE_WWN) + phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY && + phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART && + phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS && + phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN) lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, "1259 mbox: Issued mailbox cmd " "0x%x while in stopped state.\n", - pmb->mbxCommand); + phba->sysfs_mbox.mbox->mb.mbxCommand); phba->sysfs_mbox.mbox->vport = vport; @@ -3436,7 +3356,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, } if ((vport->fc_flag & FC_OFFLINE_MODE) || - (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) { + (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){ spin_unlock_irq(&phba->hbalock); rc = lpfc_sli_issue_mbox (phba, @@ -3448,7 +3368,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, spin_unlock_irq(&phba->hbalock); rc = lpfc_sli_issue_mbox_wait (phba, phba->sysfs_mbox.mbox, - lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ); + lpfc_mbox_tmo_val(phba, + phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ); spin_lock_irq(&phba->hbalock); } @@ -3470,7 +3391,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr, return -EAGAIN; } - memcpy(buf, (uint8_t *) &pmb + off, count); + memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count); phba->sysfs_mbox.offset = off + count; @@ -3664,9 +3585,6 @@ lpfc_get_host_speed(struct Scsi_Host *shost) case LA_8GHZ_LINK: fc_host_speed(shost) = FC_PORTSPEED_8GBIT; break; - case LA_10GHZ_LINK: - fc_host_speed(shost) = FC_PORTSPEED_10GBIT; - break; default: fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; @@ -3734,7 +3652,7 @@ lpfc_get_stats(struct Scsi_Host *shost) */ if (phba->link_state < LPFC_LINK_DOWN || !phba->mbox_mem_pool || - (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0) + (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0) return NULL; if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) @@ -3745,14 +3663,14 @@ lpfc_get_stats(struct Scsi_Host *shost) return NULL; memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); - pmb = &pmboxq->u.mb; + pmb = &pmboxq->mb; pmb->mbxCommand = MBX_READ_STATUS; pmb->mbxOwner = OWN_HOST; pmboxq->context1 = NULL; pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || - (!(psli->sli_flag & LPFC_SLI_ACTIVE))) + (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); @@ -3777,7 +3695,7 @@ lpfc_get_stats(struct Scsi_Host *shost) pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || - (!(psli->sli_flag & LPFC_SLI_ACTIVE))) + (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); @@ -3851,7 +3769,7 @@ lpfc_reset_stats(struct Scsi_Host *shost) return; memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t)); - pmb = &pmboxq->u.mb; + pmb = &pmboxq->mb; pmb->mbxCommand = MBX_READ_STATUS; pmb->mbxOwner = OWN_HOST; pmb->un.varWords[0] = 0x1; /* reset request */ @@ -3859,7 +3777,7 @@ lpfc_reset_stats(struct Scsi_Host *shost) pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || - (!(psli->sli_flag & LPFC_SLI_ACTIVE))) + (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); @@ -3877,7 +3795,7 @@ lpfc_reset_stats(struct Scsi_Host *shost) pmboxq->vport = vport; if ((vport->fc_flag & FC_OFFLINE_MODE) || - (!(psli->sli_flag & LPFC_SLI_ACTIVE))) + (!(psli->sli_flag & LPFC_SLI2_ACTIVE))) rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL); else rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2); @@ -4044,21 +3962,6 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport) lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); } -/** - * lpfc_hba_log_verbose_init - Set hba's log verbose level - * @phba: Pointer to lpfc_hba struct. - * - * This function is called by the lpfc_get_cfgparam() routine to set the - * module lpfc_log_verbose into the @phba cfg_log_verbose for use with - * log messsage according to the module's lpfc_log_verbose parameter setting - * before hba port or vport created. - **/ -static void -lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose) -{ - phba->cfg_log_verbose = verbose; -} - struct fc_function_template lpfc_transport_functions = { /* fixed attributes the driver supports */ .show_host_node_name = 1, @@ -4202,9 +4105,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_poll_tmo_init(phba, lpfc_poll_tmo); lpfc_enable_npiv_init(phba, lpfc_enable_npiv); lpfc_use_msi_init(phba, lpfc_use_msi); - lpfc_fcp_imax_init(phba, lpfc_fcp_imax); - lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); - lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset); lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); lpfc_enable_bg_init(phba, lpfc_enable_bg); @@ -4213,10 +4113,26 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) phba->cfg_soft_wwpn = 0L; lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt); - lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); - lpfc_enable_fip_init(phba, lpfc_enable_fip); - lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); + /* + * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * used to create the sg_dma_buf_pool must be dynamically calculated. + * 2 segments are added since the IOCB needs a command and response bde. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + + if (phba->cfg_enable_bg) { + phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; + phba->cfg_sg_dma_buf_size += + phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); + } + + /* Also reinitialize the host templates with new values. */ + lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); return; } diff --git a/trunk/drivers/scsi/lpfc/lpfc_crtn.h b/trunk/drivers/scsi/lpfc/lpfc_crtn.h index d2a922997c0f..f88ce3f26190 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_crtn.h +++ b/trunk/drivers/scsi/lpfc/lpfc_crtn.h @@ -23,8 +23,6 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *); struct fc_rport; void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *); -void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t); -int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *); void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); @@ -37,19 +35,17 @@ int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int); void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *); -int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, - LPFC_MBOXQ_t *, uint32_t); +int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, + LPFC_MBOXQ_t *, uint32_t); void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); -void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); +void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *); void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); -void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *); struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t); void lpfc_cleanup_rpis(struct lpfc_vport *, int); int lpfc_linkdown(struct lpfc_hba *); -void lpfc_linkdown_port(struct lpfc_vport *); void lpfc_port_link_failure(struct lpfc_vport *); void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); @@ -58,7 +54,6 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); -void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *); void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *); struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *, @@ -110,7 +105,6 @@ int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t); int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *); int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t); -int lpfc_issue_fabric_reglogin(struct lpfc_vport *); int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *); int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *, @@ -155,19 +149,15 @@ int lpfc_online(struct lpfc_hba *); void lpfc_unblock_mgmt_io(struct lpfc_hba *); void lpfc_offline_prep(struct lpfc_hba *); void lpfc_offline(struct lpfc_hba *); -void lpfc_reset_hba(struct lpfc_hba *); int lpfc_sli_setup(struct lpfc_hba *); int lpfc_sli_queue_setup(struct lpfc_hba *); void lpfc_handle_eratt(struct lpfc_hba *); void lpfc_handle_latt(struct lpfc_hba *); -irqreturn_t lpfc_sli_intr_handler(int, void *); -irqreturn_t lpfc_sli_sp_intr_handler(int, void *); -irqreturn_t lpfc_sli_fp_intr_handler(int, void *); -irqreturn_t lpfc_sli4_intr_handler(int, void *); -irqreturn_t lpfc_sli4_sp_intr_handler(int, void *); -irqreturn_t lpfc_sli4_fp_intr_handler(int, void *); +irqreturn_t lpfc_intr_handler(int, void *); +irqreturn_t lpfc_sp_intr_handler(int, void *); +irqreturn_t lpfc_fp_intr_handler(int, void *); void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); @@ -175,32 +165,16 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *); LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *); -void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); -int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *); -int lpfc_mbox_dev_check(struct lpfc_hba *); int lpfc_mbox_tmo_val(struct lpfc_hba *, int); -void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *); -void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); -void lpfc_init_vpi(struct lpfcMboxq *, uint16_t); -void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t); -void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *); -void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t); -void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *); void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *, uint32_t , LPFC_MBOXQ_t *); struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *); void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *); -struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *); -void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *); -void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *, - uint16_t); -void lpfc_unregister_unused_fcf(struct lpfc_hba *); -int lpfc_mem_alloc(struct lpfc_hba *, int align); +int lpfc_mem_alloc(struct lpfc_hba *); void lpfc_mem_free(struct lpfc_hba *); -void lpfc_mem_free_all(struct lpfc_hba *); void lpfc_stop_vport_timers(struct lpfc_vport *); void lpfc_poll_timeout(unsigned long ptr); @@ -212,7 +186,6 @@ void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *); uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *); void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t); -void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_reset_barrier(struct lpfc_hba * phba); int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); @@ -225,13 +198,12 @@ int lpfc_sli_host_down(struct lpfc_vport *); int lpfc_sli_hba_down(struct lpfc_hba *); int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); int lpfc_sli_handle_mb_event(struct lpfc_hba *); -void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *); +int lpfc_sli_flush_mbox_queue(struct lpfc_hba *); int lpfc_sli_check_eratt(struct lpfc_hba *); -void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, +int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *, struct lpfc_sli_ring *, uint32_t); -int lpfc_sli4_handle_received_buffer(struct lpfc_hba *); void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); -int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, +int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *, uint32_t); void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); @@ -265,7 +237,7 @@ struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *, int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); -int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t, +int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *, struct lpfc_iocbq *, uint32_t); void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, @@ -282,12 +254,6 @@ void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *); const char* lpfc_info(struct Scsi_Host *); int lpfc_scan_finished(struct Scsi_Host *, unsigned long); -int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t); -int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t); -int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t); -int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t); -int lpfc_api_table_setup(struct lpfc_hba *, uint8_t); - void lpfc_get_cfgparam(struct lpfc_hba *); void lpfc_get_vport_cfgparam(struct lpfc_vport *); int lpfc_alloc_sysfs_attr(struct lpfc_vport *); @@ -348,15 +314,8 @@ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *); void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *); -void lpfc_create_static_vport(struct lpfc_hba *); -void lpfc_stop_hba_timers(struct lpfc_hba *); -void lpfc_stop_port(struct lpfc_hba *); -void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); -int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); -void lpfc_start_fdiscs(struct lpfc_hba *phba); #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) #define HBA_EVENT_RSCN 5 #define HBA_EVENT_LINK_UP 2 #define HBA_EVENT_LINK_DOWN 3 - diff --git a/trunk/drivers/scsi/lpfc/lpfc_ct.c b/trunk/drivers/scsi/lpfc/lpfc_ct.c index 1dbccfd3d022..896c7b0351e5 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_ct.c +++ b/trunk/drivers/scsi/lpfc/lpfc_ct.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -32,10 +32,8 @@ #include #include -#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" -#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -269,6 +267,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, uint32_t tmo, uint8_t retry) { struct lpfc_hba *phba = vport->phba; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; IOCB_t *icmd; struct lpfc_iocbq *geniocb; int rc; @@ -331,7 +331,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT; geniocb->vport = vport; geniocb->retry = retry; - rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0); + rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0); if (rc == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, geniocb); @@ -1578,9 +1578,6 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode) case LA_8GHZ_LINK: ae->un.PortSpeed = HBA_PORTSPEED_8GBIT; break; - case LA_10GHZ_LINK: - ae->un.PortSpeed = HBA_PORTSPEED_10GBIT; - break; default: ae->un.PortSpeed = HBA_PORTSPEED_UNKNOWN; @@ -1733,7 +1730,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) uint8_t *fwname; if (vp->rev.rBit) { - if (psli->sli_flag & LPFC_SLI_ACTIVE) + if (psli->sli_flag & LPFC_SLI2_ACTIVE) rev = vp->rev.sli2FwRev; else rev = vp->rev.sli1FwRev; @@ -1759,7 +1756,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) } b4 = (rev & 0x0000000f); - if (psli->sli_flag & LPFC_SLI_ACTIVE) + if (psli->sli_flag & LPFC_SLI2_ACTIVE) fwname = vp->rev.sli2FwName; else fwname = vp->rev.sli1FwName; diff --git a/trunk/drivers/scsi/lpfc/lpfc_debugfs.c b/trunk/drivers/scsi/lpfc/lpfc_debugfs.c index 2b02b1fb39a0..52be5644e07a 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/trunk/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2007-2009 Emulex. All rights reserved. * + * Copyright (C) 2007-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -33,10 +33,8 @@ #include #include -#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" -#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -282,8 +280,6 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) struct lpfc_dmabuf *d_buf; struct hbq_dmabuf *hbq_buf; - if (phba->sli_rev != 3) - return 0; cnt = LPFC_HBQINFO_SIZE; spin_lock_irq(&phba->hbalock); @@ -493,15 +489,12 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size) pring->next_cmdidx, pring->local_getidx, pring->flag, pgpp->rspPutInx, pring->numRiocb); } - - if (phba->sli_rev <= LPFC_SLI_REV3) { - word0 = readl(phba->HAregaddr); - word1 = readl(phba->CAregaddr); - word2 = readl(phba->HSregaddr); - word3 = readl(phba->HCregaddr); - len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x " - "HC:%08x\n", word0, word1, word2, word3); - } + word0 = readl(phba->HAregaddr); + word1 = readl(phba->CAregaddr); + word2 = readl(phba->HSregaddr); + word3 = readl(phba->HCregaddr); + len += snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x HC:%08x\n", + word0, word1, word2, word3); spin_unlock_irq(&phba->hbalock); return len; } diff --git a/trunk/drivers/scsi/lpfc/lpfc_disc.h b/trunk/drivers/scsi/lpfc/lpfc_disc.h index 1142070e9484..ffd108972072 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_disc.h +++ b/trunk/drivers/scsi/lpfc/lpfc_disc.h @@ -135,7 +135,6 @@ struct lpfc_nodelist { #define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ #define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ #define NLP_SC_REQ 0x20000000 /* Target requires authentication */ -#define NLP_RPI_VALID 0x80000000 /* nlp_rpi is valid */ /* ndlp usage management macros */ #define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ diff --git a/trunk/drivers/scsi/lpfc/lpfc_els.c b/trunk/drivers/scsi/lpfc/lpfc_els.c index 6bdeb14878a2..b8b34cf5c3d2 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_els.c +++ b/trunk/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -28,10 +28,8 @@ #include #include -#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" -#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -86,8 +84,7 @@ lpfc_els_chk_latt(struct lpfc_vport *vport) uint32_t ha_copy; if (vport->port_state >= LPFC_VPORT_READY || - phba->link_state == LPFC_LINK_DOWN || - phba->sli_rev > LPFC_SLI_REV3) + phba->link_state == LPFC_LINK_DOWN) return 0; /* Read the HBA Host Attention Register */ @@ -222,7 +219,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, icmd->un.elsreq64.myID = vport->fc_myDID; /* For ELS_REQUEST64_CR, use the VPI by default */ - icmd->ulpContext = vport->vpi + phba->vpi_base; + icmd->ulpContext = vport->vpi; icmd->ulpCt_h = 0; /* The CT field must be 0=INVALID_RPI for the ECHO cmd */ if (elscmd == ELS_CMD_ECHO) @@ -308,7 +305,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, * 0 - successfully issued fabric registration login for @vport * -ENXIO -- failed to issue fabric registration login for @vport **/ -int +static int lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; @@ -348,7 +345,8 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) err = 4; goto fail; } - rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0); + rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, + 0); if (rc) { err = 5; goto fail_free_mbox; @@ -387,75 +385,6 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport) return -ENXIO; } -/** - * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login - * @vport: pointer to a host virtual N_Port data structure. - * - * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for - * the @vport. This mailbox command is necessary for FCoE only. - * - * Return code - * 0 - successfully issued REG_VFI for @vport - * A failure code otherwise. - **/ -static int -lpfc_issue_reg_vfi(struct lpfc_vport *vport) -{ - struct lpfc_hba *phba = vport->phba; - LPFC_MBOXQ_t *mboxq; - struct lpfc_nodelist *ndlp; - struct serv_parm *sp; - struct lpfc_dmabuf *dmabuf; - int rc = 0; - - sp = &phba->fc_fabparam; - ndlp = lpfc_findnode_did(vport, Fabric_DID); - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { - rc = -ENODEV; - goto fail; - } - - dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); - if (!dmabuf) { - rc = -ENOMEM; - goto fail; - } - dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys); - if (!dmabuf->virt) { - rc = -ENOMEM; - goto fail_free_dmabuf; - } - mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mboxq) { - rc = -ENOMEM; - goto fail_free_coherent; - } - vport->port_state = LPFC_FABRIC_CFG_LINK; - memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam)); - lpfc_reg_vfi(mboxq, vport, dmabuf->phys); - mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi; - mboxq->vport = vport; - mboxq->context1 = dmabuf; - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - rc = -ENXIO; - goto fail_free_mbox; - } - return 0; - -fail_free_mbox: - mempool_free(mboxq, phba->mbox_mem_pool); -fail_free_coherent: - lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); -fail_free_dmabuf: - kfree(dmabuf); -fail: - lpfc_vport_set_state(vport, FC_VPORT_FAILED); - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, - "0289 Issue Register VFI failed: Err %d\n", rc); - return rc; -} - /** * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port * @vport: pointer to a host virtual N_Port data structure. @@ -568,24 +497,17 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } } - if (phba->sli_rev < LPFC_SLI_REV4) { - lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); - if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && - vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) - lpfc_register_new_vport(phba, vport, ndlp); - else - lpfc_issue_fabric_reglogin(vport); - } else { - ndlp->nlp_type |= NLP_FABRIC; - lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); - if (vport->vfi_state & LPFC_VFI_REGISTERED) { - lpfc_start_fdiscs(phba); - lpfc_do_scr_ns_plogi(phba, vport); - } else - lpfc_issue_reg_vfi(vport); + lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); + + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED && + vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) { + lpfc_register_new_vport(phba, vport, ndlp); + return 0; } + lpfc_issue_fabric_reglogin(vport); return 0; } + /** * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port * @vport: pointer to a host virtual N_Port data structure. @@ -893,14 +815,9 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (sp->cmn.fcphHigh < FC_PH3) sp->cmn.fcphHigh = FC_PH3; - if (phba->sli_rev == LPFC_SLI_REV4) { - elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1); - elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1); - /* FLOGI needs to be 3 for WQE FCFI */ - /* Set the fcfi to the fcfi we registered with */ - elsiocb->iocb.ulpContext = phba->fcf.fcfi; - } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { sp->cmn.request_multiple_Nport = 1; + /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */ icmd->ulpCt_h = 1; icmd->ulpCt_l = 0; @@ -1013,8 +930,6 @@ lpfc_initial_flogi(struct lpfc_vport *vport) if (!ndlp) return 0; lpfc_nlp_init(vport, ndlp, Fabric_DID); - /* Set the node type */ - ndlp->nlp_type |= NLP_FABRIC; /* Put ndlp onto node list */ lpfc_enqueue_node(vport, ndlp); } else if (!NLP_CHK_NODE_ACT(ndlp)) { @@ -1435,12 +1350,14 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) IOCB_t *icmd; struct lpfc_nodelist *ndlp; struct lpfc_iocbq *elsiocb; + struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; int ret; psli = &phba->sli; + pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ ndlp = lpfc_findnode_did(vport, did); if (ndlp && !NLP_CHK_NODE_ACT(ndlp)) @@ -1474,7 +1391,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) phba->fc_stat.elsXmitPLOGI++; elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi; - ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); if (ret == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); @@ -1584,9 +1501,14 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, PRLI *npr; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; + struct lpfc_sli_ring *pring; + struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; + psli = &phba->sli; + pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ + cmdsize = (sizeof(uint32_t) + sizeof(PRLI)); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, ndlp->nlp_DID, ELS_CMD_PRLI); @@ -1628,8 +1550,7 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_PRLI_SND; spin_unlock_irq(shost->host_lock); - if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == - IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_PRLI_SND; spin_unlock_irq(shost->host_lock); @@ -1687,8 +1608,7 @@ lpfc_adisc_done(struct lpfc_vport *vport) * and continue discovery. */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && - !(vport->fc_flag & FC_RSCN_MODE) && - (phba->sli_rev < LPFC_SLI_REV4)) { + !(vport->fc_flag & FC_RSCN_MODE)) { lpfc_issue_reg_vpi(phba, vport); return; } @@ -1868,6 +1788,8 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ADISC *ap; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; uint8_t *pcmd; uint16_t cmdsize; @@ -1900,8 +1822,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_ADISC_SND; spin_unlock_irq(shost->host_lock); - if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == - IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_ADISC_SND; spin_unlock_irq(shost->host_lock); @@ -2016,10 +1937,15 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; + struct lpfc_sli_ring *pring; + struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; int rc; + psli = &phba->sli; + pring = &psli->ring[LPFC_ELS_RING]; + spin_lock_irq(shost->host_lock); if (ndlp->nlp_flag & NLP_LOGO_SND) { spin_unlock_irq(shost->host_lock); @@ -2052,7 +1978,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_LOGO_SND; spin_unlock_irq(shost->host_lock); - rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); if (rc == IOCB_ERROR) { spin_lock_irq(shost->host_lock); @@ -2132,12 +2058,14 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; + struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; struct lpfc_nodelist *ndlp; psli = &phba->sli; + pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ cmdsize = (sizeof(uint32_t) + sizeof(SCR)); ndlp = lpfc_findnode_did(vport, nportid); @@ -2180,8 +2108,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) phba->fc_stat.elsXmitSCR++; elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; - if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == - IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { /* The additional lpfc_nlp_put will cause the following * lpfc_els_free_iocb routine to trigger the rlease of * the node. @@ -2225,6 +2152,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) struct lpfc_hba *phba = vport->phba; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; + struct lpfc_sli_ring *pring; struct lpfc_sli *psli; FARP *fp; uint8_t *pcmd; @@ -2234,6 +2162,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) struct lpfc_nodelist *ndlp; psli = &phba->sli; + pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ cmdsize = (sizeof(uint32_t) + sizeof(FARP)); ndlp = lpfc_findnode_did(vport, nportid); @@ -2290,8 +2219,7 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry) phba->fc_stat.elsXmitFARPR++; elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd; - if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == - IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { /* The additional lpfc_nlp_put will cause the following * lpfc_els_free_iocb routine to trigger the release of * the node. @@ -3021,14 +2949,6 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; - /* - * This routine is used to register and unregister in previous SLI - * modes. - */ - if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && - (phba->sli_rev == LPFC_SLI_REV4)) - lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); - pmb->context1 = NULL; lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); @@ -3041,7 +2961,6 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) */ lpfc_nlp_not_used(ndlp); } - return; } @@ -3251,6 +3170,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, IOCB_t *icmd; IOCB_t *oldcmd; struct lpfc_iocbq *elsiocb; + struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; @@ -3258,6 +3178,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, ELS_PKT *els_pkt_ptr; psli = &phba->sli; + pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ oldcmd = &oldiocb->iocb; switch (flag) { @@ -3345,7 +3266,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, } phba->fc_stat.elsXmitACC++; - rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -3384,12 +3305,15 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, IOCB_t *icmd; IOCB_t *oldcmd; struct lpfc_iocbq *elsiocb; + struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; int rc; psli = &phba->sli; + pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ + cmdsize = 2 * sizeof(uint32_t); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_LS_RJT); @@ -3422,7 +3346,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, phba->fc_stat.elsXmitLSRJT++; elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; - rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); @@ -3455,6 +3379,8 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) { struct lpfc_hba *phba = vport->phba; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; ADISC *ap; IOCB_t *icmd, *oldcmd; struct lpfc_iocbq *elsiocb; @@ -3496,7 +3422,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, phba->fc_stat.elsXmitACC++; elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; - rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -3533,12 +3459,14 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, IOCB_t *icmd; IOCB_t *oldcmd; struct lpfc_iocbq *elsiocb; + struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; int rc; psli = &phba->sli; + pring = &psli->ring[LPFC_ELS_RING]; /* ELS ring */ cmdsize = sizeof(uint32_t) + sizeof(PRLI); elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, @@ -3592,7 +3520,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, phba->fc_stat.elsXmitACC++; elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; - rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -3634,12 +3562,15 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, RNID *rn; IOCB_t *icmd, *oldcmd; struct lpfc_iocbq *elsiocb; + struct lpfc_sli_ring *pring; struct lpfc_sli *psli; uint8_t *pcmd; uint16_t cmdsize; int rc; psli = &phba->sli; + pring = &psli->ring[LPFC_ELS_RING]; + cmdsize = sizeof(uint32_t) + sizeof(uint32_t) + (2 * sizeof(struct lpfc_name)); if (format) @@ -3695,7 +3626,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, * it could be freed */ - rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; @@ -3908,9 +3839,7 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did) payload_len -= sizeof(uint32_t); switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { case RSCN_ADDRESS_FORMAT_PORT: - if ((ns_did.un.b.domain == rscn_did.un.b.domain) - && (ns_did.un.b.area == rscn_did.un.b.area) - && (ns_did.un.b.id == rscn_did.un.b.id)) + if (ns_did.un.word == rscn_did.un.word) goto return_did_out; break; case RSCN_ADDRESS_FORMAT_AREA: @@ -4371,7 +4300,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, lpfc_init_link(phba, mbox, phba->cfg_topology, phba->cfg_link_speed); - mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; + mbox->mb.un.varInitLnk.lipsr_AL_PA = 0; mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; mbox->vport = vport; rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); @@ -4511,6 +4440,8 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, static void lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; MAILBOX_t *mb; IOCB_t *icmd; RPS_RSP *rps_rsp; @@ -4520,7 +4451,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) uint16_t xri, status; uint32_t cmdsize; - mb = &pmb->u.mb; + mb = &pmb->mb; ndlp = (struct lpfc_nodelist *) pmb->context2; xri = (uint16_t) ((unsigned long)(pmb->context1)); @@ -4576,7 +4507,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ndlp->nlp_rpi); elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; phba->fc_stat.elsXmitACC++; - if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) + if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) lpfc_els_free_iocb(phba, elsiocb); return; } @@ -4685,6 +4616,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, IOCB_t *icmd, *oldcmd; RPL_RSP rpl_rsp; struct lpfc_iocbq *elsiocb; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; uint8_t *pcmd; elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, @@ -4721,8 +4654,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, ndlp->nlp_rpi); elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; phba->fc_stat.elsXmitACC++; - if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == - IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); return 1; } @@ -4951,10 +4883,7 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, } else { /* FAN verified - skip FLOGI */ vport->fc_myDID = vport->fc_prevDID; - if (phba->sli_rev < LPFC_SLI_REV4) - lpfc_issue_fabric_reglogin(vport); - else - lpfc_issue_reg_vfi(vport); + lpfc_issue_fabric_reglogin(vport); } } return 0; @@ -5637,10 +5566,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, dropit: if (vport && !(vport->load_flag & FC_UNLOADING)) - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, - "0111 Dropping received ELS cmd " + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "(%d):0111 Dropping received ELS cmd " "Data: x%x x%x x%x\n", - icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout); + vport->vpi, icmd->ulpStatus, + icmd->un.ulpWord[4], icmd->ulpTimeout); phba->fc_stat.elsRcvDrop++; } @@ -5716,9 +5646,10 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) { if (icmd->unsli3.rcvsli3.vpi == 0xffff) vport = phba->pport; - else - vport = lpfc_find_vport_by_vpid(phba, - icmd->unsli3.rcvsli3.vpi - phba->vpi_base); + else { + uint16_t vpi = icmd->unsli3.rcvsli3.vpi; + vport = lpfc_find_vport_by_vpid(phba, vpi); + } } /* If there are no BDEs associated * with this IOCB, there is nothing to do. @@ -5850,7 +5781,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; spin_lock_irq(shost->host_lock); vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; @@ -5887,10 +5818,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } else { if (vport == phba->pport) - if (phba->sli_rev < LPFC_SLI_REV4) - lpfc_issue_fabric_reglogin(vport); - else - lpfc_issue_reg_vfi(vport); + lpfc_issue_fabric_reglogin(vport); else lpfc_do_scr_ns_plogi(phba, vport); } @@ -5922,7 +5850,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { - lpfc_reg_vpi(vport, mbox); + lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox); mbox->vport = vport; mbox->context2 = lpfc_nlp_get(ndlp); mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport; @@ -6211,6 +6139,7 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_hba *phba = vport->phba; + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; IOCB_t *icmd; struct lpfc_iocbq *elsiocb; uint8_t *pcmd; @@ -6240,8 +6169,7 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_LOGO_SND; spin_unlock_irq(shost->host_lock); - if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == - IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_LOGO_SND; spin_unlock_irq(shost->host_lock); @@ -6296,6 +6224,7 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) struct lpfc_iocbq *iocb; unsigned long iflags; int ret; + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; IOCB_t *cmd; repeat: @@ -6319,7 +6248,7 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba) "Fabric sched1: ste:x%x", iocb->vport->port_state, 0, 0); - ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); + ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); if (ret == IOCB_ERROR) { iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; @@ -6465,6 +6394,7 @@ static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) { unsigned long iflags; + struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; int ready; int ret; @@ -6488,7 +6418,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb) "Fabric sched2: ste:x%x", iocb->vport->port_state, 0, 0); - ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0); + ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0); if (ret == IOCB_ERROR) { iocb->iocb_cmpl = iocb->fabric_iocb_cmpl; @@ -6594,38 +6524,3 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba) lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); } - -/** - * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort - * @phba: pointer to lpfc hba data structure. - * @axri: pointer to the els xri abort wcqe structure. - * - * This routine is invoked by the worker thread to process a SLI4 slow-path - * ELS aborted xri. - **/ -void -lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, - struct sli4_wcqe_xri_aborted *axri) -{ - uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); - struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; - unsigned long iflag = 0; - - spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag); - list_for_each_entry_safe(sglq_entry, sglq_next, - &phba->sli4_hba.lpfc_abts_els_sgl_list, list) { - if (sglq_entry->sli4_xritag == xri) { - list_del(&sglq_entry->list); - spin_unlock_irqrestore( - &phba->sli4_hba.abts_sgl_list_lock, - iflag); - spin_lock_irqsave(&phba->hbalock, iflag); - - list_add_tail(&sglq_entry->list, - &phba->sli4_hba.lpfc_sgl_list); - spin_unlock_irqrestore(&phba->hbalock, iflag); - return; - } - } - spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag); -} diff --git a/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c b/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c index 35c41ae75be2..e764ce0bf704 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -29,12 +29,10 @@ #include #include -#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_sli.h" -#include "lpfc_sli4.h" #include "lpfc_scsi.h" #include "lpfc.h" #include "lpfc_logmsg.h" @@ -275,8 +273,6 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); - - lpfc_unregister_unused_fcf(phba); } /** @@ -299,11 +295,10 @@ lpfc_alloc_fast_evt(struct lpfc_hba *phba) { ret = kzalloc(sizeof(struct lpfc_fast_path_event), GFP_ATOMIC); - if (ret) { + if (ret) atomic_inc(&phba->fast_event_count); - INIT_LIST_HEAD(&ret->work_evt.evt_listp); - ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; - } + INIT_LIST_HEAD(&ret->work_evt.evt_listp); + ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; return ret; } @@ -496,10 +491,6 @@ lpfc_work_done(struct lpfc_hba *phba) phba->work_ha = 0; spin_unlock_irq(&phba->hbalock); - /* First, try to post the next mailbox command to SLI4 device */ - if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) - lpfc_sli4_post_async_mbox(phba); - if (ha_copy & HA_ERATT) /* Handle the error attention event */ lpfc_handle_eratt(phba); @@ -510,27 +501,9 @@ lpfc_work_done(struct lpfc_hba *phba) if (ha_copy & HA_LATT) lpfc_handle_latt(phba); - /* Process SLI4 events */ - if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { - if (phba->hba_flag & FCP_XRI_ABORT_EVENT) - lpfc_sli4_fcp_xri_abort_event_proc(phba); - if (phba->hba_flag & ELS_XRI_ABORT_EVENT) - lpfc_sli4_els_xri_abort_event_proc(phba); - if (phba->hba_flag & ASYNC_EVENT) - lpfc_sli4_async_event_proc(phba); - if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; - spin_unlock_irq(&phba->hbalock); - lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); - } - if (phba->hba_flag & HBA_RECEIVE_BUFFER) - lpfc_sli4_handle_received_buffer(phba); - } - vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 0; i <= phba->max_vports; i++) { + for(i = 0; i <= phba->max_vpi; i++) { /* * We could have no vports in array if unloading, so if * this happens then just use the pport @@ -582,24 +555,23 @@ lpfc_work_done(struct lpfc_hba *phba) /* * Turn on Ring interrupts */ - if (phba->sli_rev <= LPFC_SLI_REV3) { - spin_lock_irq(&phba->hbalock); - control = readl(phba->HCregaddr); - if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { - lpfc_debugfs_slow_ring_trc(phba, - "WRK Enable ring: cntl:x%x hacopy:x%x", - control, ha_copy, 0); - - control |= (HC_R0INT_ENA << LPFC_ELS_RING); - writel(control, phba->HCregaddr); - readl(phba->HCregaddr); /* flush */ - } else { - lpfc_debugfs_slow_ring_trc(phba, - "WRK Ring ok: cntl:x%x hacopy:x%x", - control, ha_copy, 0); - } - spin_unlock_irq(&phba->hbalock); + spin_lock_irq(&phba->hbalock); + control = readl(phba->HCregaddr); + if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { + lpfc_debugfs_slow_ring_trc(phba, + "WRK Enable ring: cntl:x%x hacopy:x%x", + control, ha_copy, 0); + + control |= (HC_R0INT_ENA << LPFC_ELS_RING); + writel(control, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + } + else { + lpfc_debugfs_slow_ring_trc(phba, + "WRK Ring ok: cntl:x%x hacopy:x%x", + control, ha_copy, 0); } + spin_unlock_irq(&phba->hbalock); } lpfc_work_list_done(phba); } @@ -717,7 +689,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport) lpfc_can_disctmo(vport); } -void +static void lpfc_linkdown_port(struct lpfc_vport *vport) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); @@ -744,7 +716,6 @@ lpfc_linkdown(struct lpfc_hba *phba) if (phba->link_state == LPFC_LINK_DOWN) return 0; spin_lock_irq(&phba->hbalock); - phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED); if (phba->link_state > LPFC_LINK_DOWN) { phba->link_state = LPFC_LINK_DOWN; phba->pport->fc_flag &= ~FC_LBIT; @@ -752,7 +723,7 @@ lpfc_linkdown(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { /* Issue a LINK DOWN event to all nodes */ lpfc_linkdown_port(vports[i]); } @@ -862,11 +833,10 @@ lpfc_linkup(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) lpfc_linkup_port(vports[i]); lpfc_destroy_vport_work_array(phba, vports); - if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && - (phba->sli_rev < LPFC_SLI_REV4)) + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) lpfc_issue_clear_la(phba, phba->pport); return 0; @@ -884,7 +854,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_sli *psli = &phba->sli; - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; uint32_t control; /* Since we don't do discovery right now, turn these off here */ @@ -947,7 +917,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; - if (pmb->u.mb.mbxStatus) + if (pmb->mb.mbxStatus) goto out; mempool_free(pmb, phba->mbox_mem_pool); @@ -975,7 +945,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "0306 CONFIG_LINK mbxStatus error x%x " "HBA state x%x\n", - pmb->u.mb.mbxStatus, vport->port_state); + pmb->mb.mbxStatus, vport->port_state); mempool_free(pmb, phba->mbox_mem_pool); lpfc_linkdown(phba); @@ -988,593 +958,10 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) return; } -static void -lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) -{ - struct lpfc_vport *vport = mboxq->vport; - unsigned long flags; - - if (mboxq->u.mb.mbxStatus) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, - "2017 REG_FCFI mbxStatus error x%x " - "HBA state x%x\n", - mboxq->u.mb.mbxStatus, vport->port_state); - mempool_free(mboxq, phba->mbox_mem_pool); - return; - } - - /* Start FCoE discovery by sending a FLOGI. */ - phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); - /* Set the FCFI registered flag */ - spin_lock_irqsave(&phba->hbalock, flags); - phba->fcf.fcf_flag |= FCF_REGISTERED; - spin_unlock_irqrestore(&phba->hbalock, flags); - if (vport->port_state != LPFC_FLOGI) { - spin_lock_irqsave(&phba->hbalock, flags); - phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); - spin_unlock_irqrestore(&phba->hbalock, flags); - lpfc_initial_flogi(vport); - } - - mempool_free(mboxq, phba->mbox_mem_pool); - return; -} - -/** - * lpfc_fab_name_match - Check if the fcf fabric name match. - * @fab_name: pointer to fabric name. - * @new_fcf_record: pointer to fcf record. - * - * This routine compare the fcf record's fabric name with provided - * fabric name. If the fabric name are identical this function - * returns 1 else return 0. - **/ -static uint32_t -lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) -{ - if ((fab_name[0] == - bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) && - (fab_name[1] == - bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) && - (fab_name[2] == - bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) && - (fab_name[3] == - bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) && - (fab_name[4] == - bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) && - (fab_name[5] == - bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) && - (fab_name[6] == - bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) && - (fab_name[7] == - bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))) - return 1; - else - return 0; -} - -/** - * lpfc_mac_addr_match - Check if the fcf mac address match. - * @phba: pointer to lpfc hba data structure. - * @new_fcf_record: pointer to fcf record. - * - * This routine compare the fcf record's mac address with HBA's - * FCF mac address. If the mac addresses are identical this function - * returns 1 else return 0. - **/ -static uint32_t -lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) -{ - if ((phba->fcf.mac_addr[0] == - bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) && - (phba->fcf.mac_addr[1] == - bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) && - (phba->fcf.mac_addr[2] == - bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) && - (phba->fcf.mac_addr[3] == - bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) && - (phba->fcf.mac_addr[4] == - bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) && - (phba->fcf.mac_addr[5] == - bf_get(lpfc_fcf_record_mac_5, new_fcf_record))) - return 1; - else - return 0; -} - -/** - * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. - * @phba: pointer to lpfc hba data structure. - * @new_fcf_record: pointer to fcf record. - * - * This routine copies the FCF information from the FCF - * record to lpfc_hba data structure. - **/ -static void -lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record) -{ - phba->fcf.fabric_name[0] = - bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); - phba->fcf.fabric_name[1] = - bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); - phba->fcf.fabric_name[2] = - bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); - phba->fcf.fabric_name[3] = - bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); - phba->fcf.fabric_name[4] = - bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); - phba->fcf.fabric_name[5] = - bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); - phba->fcf.fabric_name[6] = - bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); - phba->fcf.fabric_name[7] = - bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); - phba->fcf.mac_addr[0] = - bf_get(lpfc_fcf_record_mac_0, new_fcf_record); - phba->fcf.mac_addr[1] = - bf_get(lpfc_fcf_record_mac_1, new_fcf_record); - phba->fcf.mac_addr[2] = - bf_get(lpfc_fcf_record_mac_2, new_fcf_record); - phba->fcf.mac_addr[3] = - bf_get(lpfc_fcf_record_mac_3, new_fcf_record); - phba->fcf.mac_addr[4] = - bf_get(lpfc_fcf_record_mac_4, new_fcf_record); - phba->fcf.mac_addr[5] = - bf_get(lpfc_fcf_record_mac_5, new_fcf_record); - phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); - phba->fcf.priority = new_fcf_record->fip_priority; -} - -/** - * lpfc_register_fcf - Register the FCF with hba. - * @phba: pointer to lpfc hba data structure. - * - * This routine issues a register fcfi mailbox command to register - * the fcf with HBA. - **/ -static void -lpfc_register_fcf(struct lpfc_hba *phba) -{ - LPFC_MBOXQ_t *fcf_mbxq; - int rc; - unsigned long flags; - - spin_lock_irqsave(&phba->hbalock, flags); - - /* If the FCF is not availabe do nothing. */ - if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { - spin_unlock_irqrestore(&phba->hbalock, flags); - return; - } - - /* The FCF is already registered, start discovery */ - if (phba->fcf.fcf_flag & FCF_REGISTERED) { - phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE); - spin_unlock_irqrestore(&phba->hbalock, flags); - if (phba->pport->port_state != LPFC_FLOGI) - lpfc_initial_flogi(phba->pport); - return; - } - spin_unlock_irqrestore(&phba->hbalock, flags); - - fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, - GFP_KERNEL); - if (!fcf_mbxq) - return; - - lpfc_reg_fcfi(phba, fcf_mbxq); - fcf_mbxq->vport = phba->pport; - fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; - rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) - mempool_free(fcf_mbxq, phba->mbox_mem_pool); - - return; -} - -/** - * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery. - * @phba: pointer to lpfc hba data structure. - * @new_fcf_record: pointer to fcf record. - * @boot_flag: Indicates if this record used by boot bios. - * @addr_mode: The address mode to be used by this FCF - * - * This routine compare the fcf record with connect list obtained from the - * config region to decide if this FCF can be used for SAN discovery. It returns - * 1 if this record can be used for SAN discovery else return zero. If this FCF - * record can be used for SAN discovery, the boot_flag will indicate if this FCF - * is used by boot bios and addr_mode will indicate the addressing mode to be - * used for this FCF when the function returns. - * If the FCF record need to be used with a particular vlan id, the vlan is - * set in the vlan_id on return of the function. If not VLAN tagging need to - * be used with the FCF vlan_id will be set to 0xFFFF; - **/ -static int -lpfc_match_fcf_conn_list(struct lpfc_hba *phba, - struct fcf_record *new_fcf_record, - uint32_t *boot_flag, uint32_t *addr_mode, - uint16_t *vlan_id) -{ - struct lpfc_fcf_conn_entry *conn_entry; - - if (!phba->cfg_enable_fip) { - *boot_flag = 0; - *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, - new_fcf_record); - if (phba->valid_vlan) - *vlan_id = phba->vlan_id; - else - *vlan_id = 0xFFFF; - return 1; - } - - /* - * If there are no FCF connection table entry, driver connect to all - * FCFs. - */ - if (list_empty(&phba->fcf_conn_rec_list)) { - *boot_flag = 0; - *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, - new_fcf_record); - *vlan_id = 0xFFFF; - return 1; - } - - list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) { - if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) - continue; - - if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) && - !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name, - new_fcf_record)) - continue; - - if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { - /* - * If the vlan bit map does not have the bit set for the - * vlan id to be used, then it is not a match. - */ - if (!(new_fcf_record->vlan_bitmap - [conn_entry->conn_rec.vlan_tag / 8] & - (1 << (conn_entry->conn_rec.vlan_tag % 8)))) - continue; - } - - /* - * Check if the connection record specifies a required - * addressing mode. - */ - if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && - !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) { - - /* - * If SPMA required but FCF not support this continue. - */ - if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && - !(bf_get(lpfc_fcf_record_mac_addr_prov, - new_fcf_record) & LPFC_FCF_SPMA)) - continue; - - /* - * If FPMA required but FCF not support this continue. - */ - if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && - !(bf_get(lpfc_fcf_record_mac_addr_prov, - new_fcf_record) & LPFC_FCF_FPMA)) - continue; - } - - /* - * This fcf record matches filtering criteria. - */ - if (conn_entry->conn_rec.flags & FCFCNCT_BOOT) - *boot_flag = 1; - else - *boot_flag = 0; - - *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, - new_fcf_record); - /* - * If the user specified a required address mode, assign that - * address mode - */ - if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && - (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED))) - *addr_mode = (conn_entry->conn_rec.flags & - FCFCNCT_AM_SPMA) ? - LPFC_FCF_SPMA : LPFC_FCF_FPMA; - /* - * If the user specified a prefered address mode, use the - * addr mode only if FCF support the addr_mode. - */ - else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && - (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && - (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && - (*addr_mode & LPFC_FCF_SPMA)) - *addr_mode = LPFC_FCF_SPMA; - else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && - (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && - !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && - (*addr_mode & LPFC_FCF_FPMA)) - *addr_mode = LPFC_FCF_FPMA; - /* - * If user did not specify any addressing mode, use FPMA if - * possible else use SPMA. - */ - else if (*addr_mode & LPFC_FCF_FPMA) - *addr_mode = LPFC_FCF_FPMA; - - if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) - *vlan_id = conn_entry->conn_rec.vlan_tag; - else - *vlan_id = 0xFFFF; - - return 1; - } - - return 0; -} - -/** - * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox. - * @phba: pointer to lpfc hba data structure. - * @mboxq: pointer to mailbox object. - * - * This function iterate through all the fcf records available in - * HBA and choose the optimal FCF record for discovery. After finding - * the FCF for discovery it register the FCF record and kick start - * discovery. - * If FCF_IN_USE flag is set in currently used FCF, the routine try to - * use a FCF record which match fabric name and mac address of the - * currently used FCF record. - * If the driver support only one FCF, it will try to use the FCF record - * used by BOOT_BIOS. - */ -void -lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) -{ - void *virt_addr; - dma_addr_t phys_addr; - uint8_t *bytep; - struct lpfc_mbx_sge sge; - struct lpfc_mbx_read_fcf_tbl *read_fcf; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - struct fcf_record *new_fcf_record; - int rc; - uint32_t boot_flag, addr_mode; - uint32_t next_fcf_index; - unsigned long flags; - uint16_t vlan_id; - - /* Get the first SGE entry from the non-embedded DMA memory. This - * routine only uses a single SGE. - */ - lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); - phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); - if (unlikely(!mboxq->sge_array)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, - "2524 Failed to get the non-embedded SGE " - "virtual address\n"); - goto out; - } - virt_addr = mboxq->sge_array->addr[0]; - - shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, - &shdr->response); - /* - * The FCF Record was read and there is no reason for the driver - * to maintain the FCF record data or memory. Instead, just need - * to book keeping the FCFIs can be used. - */ - if (shdr_status || shdr_add_status) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2521 READ_FCF_RECORD mailbox failed " - "with status x%x add_status x%x, mbx\n", - shdr_status, shdr_add_status); - goto out; - } - /* Interpreting the returned information of FCF records */ - read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; - lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, - sizeof(struct lpfc_mbx_read_fcf_tbl)); - next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); - - new_fcf_record = (struct fcf_record *)(virt_addr + - sizeof(struct lpfc_mbx_read_fcf_tbl)); - lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, - sizeof(struct fcf_record)); - bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); - - rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, - &boot_flag, &addr_mode, - &vlan_id); - /* - * If the fcf record does not match with connect list entries - * read the next entry. - */ - if (!rc) - goto read_next_fcf; - /* - * If this is not the first FCF discovery of the HBA, use last - * FCF record for the discovery. - */ - spin_lock_irqsave(&phba->hbalock, flags); - if (phba->fcf.fcf_flag & FCF_IN_USE) { - if (lpfc_fab_name_match(phba->fcf.fabric_name, - new_fcf_record) && - lpfc_mac_addr_match(phba, new_fcf_record)) { - phba->fcf.fcf_flag |= FCF_AVAILABLE; - spin_unlock_irqrestore(&phba->hbalock, flags); - goto out; - } - spin_unlock_irqrestore(&phba->hbalock, flags); - goto read_next_fcf; - } - if (phba->fcf.fcf_flag & FCF_AVAILABLE) { - /* - * If the current FCF record does not have boot flag - * set and new fcf record has boot flag set, use the - * new fcf record. - */ - if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { - /* Use this FCF record */ - lpfc_copy_fcf_record(phba, new_fcf_record); - phba->fcf.addr_mode = addr_mode; - phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; - if (vlan_id != 0xFFFF) { - phba->fcf.fcf_flag |= FCF_VALID_VLAN; - phba->fcf.vlan_id = vlan_id; - } - spin_unlock_irqrestore(&phba->hbalock, flags); - goto read_next_fcf; - } - /* - * If the current FCF record has boot flag set and the - * new FCF record does not have boot flag, read the next - * FCF record. - */ - if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) { - spin_unlock_irqrestore(&phba->hbalock, flags); - goto read_next_fcf; - } - /* - * If there is a record with lower priority value for - * the current FCF, use that record. - */ - if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record) - && (new_fcf_record->fip_priority < - phba->fcf.priority)) { - /* Use this FCF record */ - lpfc_copy_fcf_record(phba, new_fcf_record); - phba->fcf.addr_mode = addr_mode; - if (vlan_id != 0xFFFF) { - phba->fcf.fcf_flag |= FCF_VALID_VLAN; - phba->fcf.vlan_id = vlan_id; - } - spin_unlock_irqrestore(&phba->hbalock, flags); - goto read_next_fcf; - } - spin_unlock_irqrestore(&phba->hbalock, flags); - goto read_next_fcf; - } - /* - * This is the first available FCF record, use this - * record. - */ - lpfc_copy_fcf_record(phba, new_fcf_record); - phba->fcf.addr_mode = addr_mode; - if (boot_flag) - phba->fcf.fcf_flag |= FCF_BOOT_ENABLE; - phba->fcf.fcf_flag |= FCF_AVAILABLE; - if (vlan_id != 0xFFFF) { - phba->fcf.fcf_flag |= FCF_VALID_VLAN; - phba->fcf.vlan_id = vlan_id; - } - spin_unlock_irqrestore(&phba->hbalock, flags); - goto read_next_fcf; - -read_next_fcf: - lpfc_sli4_mbox_cmd_free(phba, mboxq); - if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) - lpfc_register_fcf(phba); - else - lpfc_sli4_read_fcf_record(phba, next_fcf_index); - return; - -out: - lpfc_sli4_mbox_cmd_free(phba, mboxq); - lpfc_register_fcf(phba); - - return; -} - -/** - * lpfc_start_fdiscs - send fdiscs for each vports on this port. - * @phba: pointer to lpfc hba data structure. - * - * This function loops through the list of vports on the @phba and issues an - * FDISC if possible. - */ -void -lpfc_start_fdiscs(struct lpfc_hba *phba) -{ - struct lpfc_vport **vports; - int i; - - vports = lpfc_create_vport_work_array(phba); - if (vports != NULL) { - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { - if (vports[i]->port_type == LPFC_PHYSICAL_PORT) - continue; - /* There are no vpi for this vport */ - if (vports[i]->vpi > phba->max_vpi) { - lpfc_vport_set_state(vports[i], - FC_VPORT_FAILED); - continue; - } - if (phba->fc_topology == TOPOLOGY_LOOP) { - lpfc_vport_set_state(vports[i], - FC_VPORT_LINKDOWN); - continue; - } - if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) - lpfc_initial_fdisc(vports[i]); - else { - lpfc_vport_set_state(vports[i], - FC_VPORT_NO_FABRIC_SUPP); - lpfc_printf_vlog(vports[i], KERN_ERR, - LOG_ELS, - "0259 No NPIV " - "Fabric support\n"); - } - } - } - lpfc_destroy_vport_work_array(phba, vports); -} - -void -lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) -{ - struct lpfc_dmabuf *dmabuf = mboxq->context1; - struct lpfc_vport *vport = mboxq->vport; - - if (mboxq->u.mb.mbxStatus) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, - "2018 REG_VFI mbxStatus error x%x " - "HBA state x%x\n", - mboxq->u.mb.mbxStatus, vport->port_state); - if (phba->fc_topology == TOPOLOGY_LOOP) { - /* FLOGI failed, use loop map to make discovery list */ - lpfc_disc_list_loopmap(vport); - /* Start discovery */ - lpfc_disc_start(vport); - goto fail_free_mem; - } - lpfc_vport_set_state(vport, FC_VPORT_FAILED); - goto fail_free_mem; - } - /* Mark the vport has registered with its VFI */ - vport->vfi_state |= LPFC_VFI_REGISTERED; - - if (vport->port_state == LPFC_FABRIC_CFG_LINK) { - lpfc_start_fdiscs(phba); - lpfc_do_scr_ns_plogi(phba, vport); - } - -fail_free_mem: - mempool_free(mboxq, phba->mbox_mem_pool); - lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); - kfree(dmabuf); - return; -} - static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; struct lpfc_vport *vport = pmb->vport; @@ -1625,13 +1012,13 @@ static void lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) { struct lpfc_vport *vport = phba->pport; - LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; + LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox; int i; struct lpfc_dmabuf *mp; int rc; - struct fcf_record *fcf_record; sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); spin_lock_irq(&phba->hbalock); switch (la->UlnkSpeed) { @@ -1647,9 +1034,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) case LA_8GHZ_LINK: phba->fc_linkspeed = LA_8GHZ_LINK; break; - case LA_10GHZ_LINK: - phba->fc_linkspeed = LA_10GHZ_LINK; - break; default: phba->fc_linkspeed = LA_UNKNW_LINK; break; @@ -1731,66 +1115,22 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(sparam_mbox, phba->mbox_mem_pool); + if (cfglink_mbox) + mempool_free(cfglink_mbox, phba->mbox_mem_pool); goto out; } } - if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) { - cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!cfglink_mbox) - goto out; + if (cfglink_mbox) { vport->port_state = LPFC_LOCAL_CFG_LINK; lpfc_config_link(phba, cfglink_mbox); cfglink_mbox->vport = vport; cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - mempool_free(cfglink_mbox, phba->mbox_mem_pool); - goto out; - } - } else { - /* - * Add the driver's default FCF record at FCF index 0 now. This - * is phase 1 implementation that support FCF index 0 and driver - * defaults. - */ - if (phba->cfg_enable_fip == 0) { - fcf_record = kzalloc(sizeof(struct fcf_record), - GFP_KERNEL); - if (unlikely(!fcf_record)) { - lpfc_printf_log(phba, KERN_ERR, - LOG_MBOX | LOG_SLI, - "2554 Could not allocate memmory for " - "fcf record\n"); - rc = -ENODEV; - goto out; - } - - lpfc_sli4_build_dflt_fcf_record(phba, fcf_record, - LPFC_FCOE_FCF_DEF_INDEX); - rc = lpfc_sli4_add_fcf_record(phba, fcf_record); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, - LOG_MBOX | LOG_SLI, - "2013 Could not manually add FCF " - "record 0, status %d\n", rc); - rc = -ENODEV; - kfree(fcf_record); - goto out; - } - kfree(fcf_record); - } - /* - * The driver is expected to do FIP/FCF. Call the port - * and get the FCF Table. - */ - rc = lpfc_sli4_read_fcf_record(phba, - LPFC_FCOE_FCF_GET_FIRST); - if (rc) - goto out; + if (rc != MBX_NOT_FINISHED) + return; + mempool_free(cfglink_mbox, phba->mbox_mem_pool); } - - return; out: lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, @@ -1807,12 +1147,10 @@ lpfc_enable_la(struct lpfc_hba *phba) struct lpfc_sli *psli = &phba->sli; spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_PROCESS_LA; - if (phba->sli_rev <= LPFC_SLI_REV3) { - control = readl(phba->HCregaddr); - control |= HC_LAINT_ENA; - writel(control, phba->HCregaddr); - readl(phba->HCregaddr); /* flush */ - } + control = readl(phba->HCregaddr); + control |= HC_LAINT_ENA; + writel(control, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ spin_unlock_irq(&phba->hbalock); } @@ -1821,7 +1159,6 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba) { lpfc_linkdown(phba); lpfc_enable_la(phba); - lpfc_unregister_unused_fcf(phba); /* turn on Link Attention interrupts - no CLEAR_LA needed */ } @@ -1838,7 +1175,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); READ_LA_VAR *la; - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); /* Unblock ELS traffic */ @@ -1853,7 +1190,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) goto lpfc_mbx_cmpl_read_la_free_mbuf; } - la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; + la = (READ_LA_VAR *) & pmb->mb.un.varReadLA; memcpy(&phba->alpa_map[0], mp->virt, 128); @@ -1991,7 +1328,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) static void lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); @@ -2044,7 +1381,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; switch (mb->mbxStatus) { case 0x0011: @@ -2079,128 +1416,6 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) return; } -/** - * lpfc_create_static_vport - Read HBA config region to create static vports. - * @phba: pointer to lpfc hba data structure. - * - * This routine issue a DUMP mailbox command for config region 22 to get - * the list of static vports to be created. The function create vports - * based on the information returned from the HBA. - **/ -void -lpfc_create_static_vport(struct lpfc_hba *phba) -{ - LPFC_MBOXQ_t *pmb = NULL; - MAILBOX_t *mb; - struct static_vport_info *vport_info; - int rc, i; - struct fc_vport_identifiers vport_id; - struct fc_vport *new_fc_vport; - struct Scsi_Host *shost; - struct lpfc_vport *vport; - uint16_t offset = 0; - uint8_t *vport_buff; - - pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!pmb) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0542 lpfc_create_static_vport failed to" - " allocate mailbox memory\n"); - return; - } - - mb = &pmb->u.mb; - - vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL); - if (!vport_info) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0543 lpfc_create_static_vport failed to" - " allocate vport_info\n"); - mempool_free(pmb, phba->mbox_mem_pool); - return; - } - - vport_buff = (uint8_t *) vport_info; - do { - lpfc_dump_static_vport(phba, pmb, offset); - pmb->vport = phba->pport; - rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO); - - if ((rc != MBX_SUCCESS) || mb->mbxStatus) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0544 lpfc_create_static_vport failed to" - " issue dump mailbox command ret 0x%x " - "status 0x%x\n", - rc, mb->mbxStatus); - goto out; - } - - if (mb->un.varDmp.word_cnt > - sizeof(struct static_vport_info) - offset) - mb->un.varDmp.word_cnt = - sizeof(struct static_vport_info) - offset; - - lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, - vport_buff + offset, - mb->un.varDmp.word_cnt); - offset += mb->un.varDmp.word_cnt; - - } while (mb->un.varDmp.word_cnt && - offset < sizeof(struct static_vport_info)); - - - if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) || - ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK) - != VPORT_INFO_REV)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0545 lpfc_create_static_vport bad" - " information header 0x%x 0x%x\n", - le32_to_cpu(vport_info->signature), - le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK); - - goto out; - } - - shost = lpfc_shost_from_vport(phba->pport); - - for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) { - memset(&vport_id, 0, sizeof(vport_id)); - vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn); - vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn); - if (!vport_id.port_name || !vport_id.node_name) - continue; - - vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; - vport_id.vport_type = FC_PORTTYPE_NPIV; - vport_id.disable = false; - new_fc_vport = fc_vport_create(shost, 0, &vport_id); - - if (!new_fc_vport) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0546 lpfc_create_static_vport failed to" - " create vport \n"); - continue; - } - - vport = *(struct lpfc_vport **)new_fc_vport->dd_data; - vport->vport_flag |= STATIC_VPORT; - } - -out: - /* - * If this is timed out command, setting NULL to context2 tell SLI - * layer not to use this buffer. - */ - spin_lock_irq(&phba->hbalock); - pmb->context2 = NULL; - spin_unlock_irq(&phba->hbalock); - kfree(vport_info); - if (rc != MBX_TIMEOUT) - mempool_free(pmb, phba->mbox_mem_pool); - - return; -} - /* * This routine handles processing a Fabric REG_LOGIN mailbox * command upon completion. It is setup in the LPFC_MBOXQ @@ -2211,17 +1426,16 @@ void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp; + struct lpfc_vport **vports; + int i; ndlp = (struct lpfc_nodelist *) pmb->context2; pmb->context1 = NULL; pmb->context2 = NULL; if (mb->mbxStatus) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, - "0258 Register Fabric login error: 0x%x\n", - mb->mbxStatus); lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); mempool_free(pmb, phba->mbox_mem_pool); @@ -2240,6 +1454,9 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, + "0258 Register Fabric login error: 0x%x\n", + mb->mbxStatus); /* Decrement the reference count to ndlp after the reference * to the ndlp are done. */ @@ -2248,12 +1465,34 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_VALID; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); if (vport->port_state == LPFC_FABRIC_CFG_LINK) { - lpfc_start_fdiscs(phba); + vports = lpfc_create_vport_work_array(phba); + if (vports != NULL) + for(i = 0; + i <= phba->max_vpi && vports[i] != NULL; + i++) { + if (vports[i]->port_type == LPFC_PHYSICAL_PORT) + continue; + if (phba->fc_topology == TOPOLOGY_LOOP) { + lpfc_vport_set_state(vports[i], + FC_VPORT_LINKDOWN); + continue; + } + if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) + lpfc_initial_fdisc(vports[i]); + else { + lpfc_vport_set_state(vports[i], + FC_VPORT_NO_FABRIC_SUPP); + lpfc_printf_vlog(vport, KERN_ERR, + LOG_ELS, + "0259 No NPIV " + "Fabric support\n"); + } + } + lpfc_destroy_vport_work_array(phba, vports); lpfc_do_scr_ns_plogi(phba, vport); } @@ -2277,16 +1516,13 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; struct lpfc_vport *vport = pmb->vport; if (mb->mbxStatus) { out: - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, - "0260 Register NameServer error: 0x%x\n", - mb->mbxStatus); /* decrement the node reference count held for this * callback function. */ @@ -2310,13 +1546,15 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) return; } lpfc_vport_set_state(vport, FC_VPORT_FAILED); + lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + "0260 Register NameServer error: 0x%x\n", + mb->mbxStatus); return; } pmb->context1 = NULL; ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_VALID; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); @@ -2817,7 +2055,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, if (pring->ringno == LPFC_ELS_RING) { switch (icmd->ulpCommand) { case CMD_GEN_REQUEST64_CR: - if (iocb->context_un.ndlp == ndlp) + if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) return 1; case CMD_ELS_REQUEST64_CR: if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) @@ -2864,7 +2102,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) */ psli = &phba->sli; rpi = ndlp->nlp_rpi; - if (ndlp->nlp_flag & NLP_RPI_VALID) { + if (rpi) { /* Now process each ring */ for (i = 0; i < psli->num_rings; i++) { pring = &psli->ring[i]; @@ -2912,7 +2150,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) LPFC_MBOXQ_t *mbox; int rc; - if (ndlp->nlp_flag & NLP_RPI_VALID) { + if (ndlp->nlp_rpi) { mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox); @@ -2924,7 +2162,6 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) } lpfc_no_rpi(phba, ndlp); ndlp->nlp_rpi = 0; - ndlp->nlp_flag &= ~NLP_RPI_VALID; return 1; } return 0; @@ -3015,7 +2252,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ if ((mb = phba->sli.mbox_active)) { - if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && + if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { mb->context2 = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -3024,7 +2261,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { - if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && + if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { mp = (struct lpfc_dmabuf *) (mb->context1); if (mp) { @@ -3072,14 +2309,13 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) int rc; lpfc_cancel_retry_delay_tmo(vport, ndlp); - if ((ndlp->nlp_flag & NLP_DEFER_RM) && - !(ndlp->nlp_flag & NLP_RPI_VALID)) { + if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) { /* For this case we need to cleanup the default rpi * allocated by the firmware. */ if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { - rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID, + rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID, (uint8_t *) &vport->fc_sparam, mbox, 0); if (rc) { mempool_free(mbox, phba->mbox_mem_pool); @@ -3317,8 +2553,7 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) * clear_la then don't send it. */ if ((phba->link_state >= LPFC_CLEAR_LA) || - (vport->port_type != LPFC_PHYSICAL_PORT) || - (phba->sli_rev == LPFC_SLI_REV4)) + (vport->port_type != LPFC_PHYSICAL_PORT)) return; /* Link up discovery */ @@ -3347,7 +2582,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (regvpimbox) { - lpfc_reg_vpi(vport, regvpimbox); + lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox); regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; regvpimbox->vport = vport; if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) @@ -3407,8 +2642,7 @@ lpfc_disc_start(struct lpfc_vport *vport) */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && !(vport->fc_flag & FC_PT2PT) && - !(vport->fc_flag & FC_RSCN_MODE) && - (phba->sli_rev < LPFC_SLI_REV4)) { + !(vport->fc_flag & FC_RSCN_MODE)) { lpfc_issue_reg_vpi(phba, vport); return; } @@ -3685,13 +2919,11 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) * set port_state to PORT_READY if SLI2. * cmpl_reg_vpi will set port_state to READY for SLI3. */ - if (phba->sli_rev < LPFC_SLI_REV4) { - if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) - lpfc_issue_reg_vpi(phba, vport); - else { /* NPIV Not enabled */ - lpfc_issue_clear_la(phba, vport); - vport->port_state = LPFC_VPORT_READY; - } + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) + lpfc_issue_reg_vpi(phba, vport); + else { /* NPIV Not enabled */ + lpfc_issue_clear_la(phba, vport); + vport->port_state = LPFC_VPORT_READY; } /* Setup and issue mailbox INITIALIZE LINK command */ @@ -3707,7 +2939,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) lpfc_linkdown(phba); lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, phba->cfg_link_speed); - initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; + initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0; initlinkmbox->vport = vport; initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); @@ -3727,13 +2959,11 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) * set port_state to PORT_READY if SLI2. * cmpl_reg_vpi will set port_state to READY for SLI3. */ - if (phba->sli_rev < LPFC_SLI_REV4) { - if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) - lpfc_issue_reg_vpi(phba, vport); - else { /* NPIV Not enabled */ - lpfc_issue_clear_la(phba, vport); - vport->port_state = LPFC_VPORT_READY; - } + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) + lpfc_issue_reg_vpi(phba, vport); + else { /* NPIV Not enabled */ + lpfc_issue_clear_la(phba, vport); + vport->port_state = LPFC_VPORT_READY; } break; @@ -3806,7 +3036,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; struct lpfc_vport *vport = pmb->vport; @@ -3814,7 +3044,6 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) pmb->context1 = NULL; ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_VALID; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); @@ -4068,395 +3297,3 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) return 1; return 0; } - -/** - * lpfc_fcf_inuse - Check if FCF can be unregistered. - * @phba: Pointer to hba context object. - * - * This function iterate through all FC nodes associated - * will all vports to check if there is any node with - * fc_rports associated with it. If there is an fc_rport - * associated with the node, then the node is either in - * discovered state or its devloss_timer is pending. - */ -static int -lpfc_fcf_inuse(struct lpfc_hba *phba) -{ - struct lpfc_vport **vports; - int i, ret = 0; - struct lpfc_nodelist *ndlp; - struct Scsi_Host *shost; - - vports = lpfc_create_vport_work_array(phba); - - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { - shost = lpfc_shost_from_vport(vports[i]); - spin_lock_irq(shost->host_lock); - list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { - if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport && - (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { - ret = 1; - spin_unlock_irq(shost->host_lock); - goto out; - } - } - spin_unlock_irq(shost->host_lock); - } -out: - lpfc_destroy_vport_work_array(phba, vports); - return ret; -} - -/** - * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi. - * @phba: Pointer to hba context object. - * @mboxq: Pointer to mailbox object. - * - * This function frees memory associated with the mailbox command. - */ -static void -lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) -{ - struct lpfc_vport *vport = mboxq->vport; - - if (mboxq->u.mb.mbxStatus) { - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, - "2555 UNREG_VFI mbxStatus error x%x " - "HBA state x%x\n", - mboxq->u.mb.mbxStatus, vport->port_state); - } - mempool_free(mboxq, phba->mbox_mem_pool); - return; -} - -/** - * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi. - * @phba: Pointer to hba context object. - * @mboxq: Pointer to mailbox object. - * - * This function frees memory associated with the mailbox command. - */ -static void -lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) -{ - struct lpfc_vport *vport = mboxq->vport; - - if (mboxq->u.mb.mbxStatus) { - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, - "2550 UNREG_FCFI mbxStatus error x%x " - "HBA state x%x\n", - mboxq->u.mb.mbxStatus, vport->port_state); - } - mempool_free(mboxq, phba->mbox_mem_pool); - return; -} - -/** - * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. - * @phba: Pointer to hba context object. - * - * This function check if there are any connected remote port for the FCF and - * if all the devices are disconnected, this function unregister FCFI. - * This function also tries to use another FCF for discovery. - */ -void -lpfc_unregister_unused_fcf(struct lpfc_hba *phba) -{ - LPFC_MBOXQ_t *mbox; - int rc; - struct lpfc_vport **vports; - int i; - - spin_lock_irq(&phba->hbalock); - /* - * If HBA is not running in FIP mode or - * If HBA does not support FCoE or - * If FCF is not registered. - * do nothing. - */ - if (!(phba->hba_flag & HBA_FCOE_SUPPORT) || - !(phba->fcf.fcf_flag & FCF_REGISTERED) || - (phba->cfg_enable_fip == 0)) { - spin_unlock_irq(&phba->hbalock); - return; - } - spin_unlock_irq(&phba->hbalock); - - if (lpfc_fcf_inuse(phba)) - return; - - - /* Unregister VPIs */ - vports = lpfc_create_vport_work_array(phba); - if (vports && - (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { - lpfc_mbx_unreg_vpi(vports[i]); - vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; - vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; - } - lpfc_destroy_vport_work_array(phba, vports); - - /* Unregister VFI */ - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) { - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, - "2556 UNREG_VFI mbox allocation failed" - "HBA state x%x\n", - phba->pport->port_state); - return; - } - - lpfc_unreg_vfi(mbox, phba->pport->vfi); - mbox->vport = phba->pport; - mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl; - - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, - "2557 UNREG_VFI issue mbox failed rc x%x " - "HBA state x%x\n", - rc, phba->pport->port_state); - mempool_free(mbox, phba->mbox_mem_pool); - return; - } - - /* Unregister FCF */ - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) { - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, - "2551 UNREG_FCFI mbox allocation failed" - "HBA state x%x\n", - phba->pport->port_state); - return; - } - - lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); - mbox->vport = phba->pport; - mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); - - if (rc == MBX_NOT_FINISHED) { - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, - "2552 UNREG_FCFI issue mbox failed rc x%x " - "HBA state x%x\n", - rc, phba->pport->port_state); - mempool_free(mbox, phba->mbox_mem_pool); - return; - } - - spin_lock_irq(&phba->hbalock); - phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED | - FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE | - FCF_VALID_VLAN); - spin_unlock_irq(&phba->hbalock); - - /* - * If driver is not unloading, check if there is any other - * FCF record that can be used for discovery. - */ - if ((phba->pport->load_flag & FC_UNLOADING) || - (phba->link_state < LPFC_LINK_UP)) - return; - - rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); - - if (rc) - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, - "2553 lpfc_unregister_unused_fcf failed to read FCF" - " record HBA state x%x\n", - phba->pport->port_state); -} - -/** - * lpfc_read_fcf_conn_tbl - Create driver FCF connection table. - * @phba: Pointer to hba context object. - * @buff: Buffer containing the FCF connection table as in the config - * region. - * This function create driver data structure for the FCF connection - * record table read from config region 23. - */ -static void -lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, - uint8_t *buff) -{ - struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; - struct lpfc_fcf_conn_hdr *conn_hdr; - struct lpfc_fcf_conn_rec *conn_rec; - uint32_t record_count; - int i; - - /* Free the current connect table */ - list_for_each_entry_safe(conn_entry, next_conn_entry, - &phba->fcf_conn_rec_list, list) - kfree(conn_entry); - - conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; - record_count = conn_hdr->length * sizeof(uint32_t)/ - sizeof(struct lpfc_fcf_conn_rec); - - conn_rec = (struct lpfc_fcf_conn_rec *) - (buff + sizeof(struct lpfc_fcf_conn_hdr)); - - for (i = 0; i < record_count; i++) { - if (!(conn_rec[i].flags & FCFCNCT_VALID)) - continue; - conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry), - GFP_KERNEL); - if (!conn_entry) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2566 Failed to allocate connection" - " table entry\n"); - return; - } - - memcpy(&conn_entry->conn_rec, &conn_rec[i], - sizeof(struct lpfc_fcf_conn_rec)); - conn_entry->conn_rec.vlan_tag = - le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF; - conn_entry->conn_rec.flags = - le16_to_cpu(conn_entry->conn_rec.flags); - list_add_tail(&conn_entry->list, - &phba->fcf_conn_rec_list); - } -} - -/** - * lpfc_read_fcoe_param - Read FCoe parameters from conf region.. - * @phba: Pointer to hba context object. - * @buff: Buffer containing the FCoE parameter data structure. - * - * This function update driver data structure with config - * parameters read from config region 23. - */ -static void -lpfc_read_fcoe_param(struct lpfc_hba *phba, - uint8_t *buff) -{ - struct lpfc_fip_param_hdr *fcoe_param_hdr; - struct lpfc_fcoe_params *fcoe_param; - - fcoe_param_hdr = (struct lpfc_fip_param_hdr *) - buff; - fcoe_param = (struct lpfc_fcoe_params *) - buff + sizeof(struct lpfc_fip_param_hdr); - - if ((fcoe_param_hdr->parm_version != FIPP_VERSION) || - (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) - return; - - if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) == - FIPP_MODE_ON) - phba->cfg_enable_fip = 1; - - if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) == - FIPP_MODE_OFF) - phba->cfg_enable_fip = 0; - - if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { - phba->valid_vlan = 1; - phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & - 0xFFF; - } - - phba->fc_map[0] = fcoe_param->fc_map[0]; - phba->fc_map[1] = fcoe_param->fc_map[1]; - phba->fc_map[2] = fcoe_param->fc_map[2]; - return; -} - -/** - * lpfc_get_rec_conf23 - Get a record type in config region data. - * @buff: Buffer containing config region 23 data. - * @size: Size of the data buffer. - * @rec_type: Record type to be searched. - * - * This function searches config region data to find the begining - * of the record specified by record_type. If record found, this - * function return pointer to the record else return NULL. - */ -static uint8_t * -lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type) -{ - uint32_t offset = 0, rec_length; - - if ((buff[0] == LPFC_REGION23_LAST_REC) || - (size < sizeof(uint32_t))) - return NULL; - - rec_length = buff[offset + 1]; - - /* - * One TLV record has one word header and number of data words - * specified in the rec_length field of the record header. - */ - while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t)) - <= size) { - if (buff[offset] == rec_type) - return &buff[offset]; - - if (buff[offset] == LPFC_REGION23_LAST_REC) - return NULL; - - offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t); - rec_length = buff[offset + 1]; - } - return NULL; -} - -/** - * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23. - * @phba: Pointer to lpfc_hba data structure. - * @buff: Buffer containing config region 23 data. - * @size: Size of the data buffer. - * - * This fuction parse the FCoE config parameters in config region 23 and - * populate driver data structure with the parameters. - */ -void -lpfc_parse_fcoe_conf(struct lpfc_hba *phba, - uint8_t *buff, - uint32_t size) -{ - uint32_t offset = 0, rec_length; - uint8_t *rec_ptr; - - /* - * If data size is less than 2 words signature and version cannot be - * verified. - */ - if (size < 2*sizeof(uint32_t)) - return; - - /* Check the region signature first */ - if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2567 Config region 23 has bad signature\n"); - return; - } - - offset += 4; - - /* Check the data structure version */ - if (buff[offset] != LPFC_REGION23_VERSION) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2568 Config region 23 has bad version\n"); - return; - } - offset += 4; - - rec_length = buff[offset + 1]; - - /* Read FCoE param record */ - rec_ptr = lpfc_get_rec_conf23(&buff[offset], - size - offset, FCOE_PARAM_TYPE); - if (rec_ptr) - lpfc_read_fcoe_param(phba, rec_ptr); - - /* Read FCF connection table */ - rec_ptr = lpfc_get_rec_conf23(&buff[offset], - size - offset, FCOE_CONN_TBL_TYPE); - if (rec_ptr) - lpfc_read_fcf_conn_tbl(phba, rec_ptr); - -} diff --git a/trunk/drivers/scsi/lpfc/lpfc_hw.h b/trunk/drivers/scsi/lpfc/lpfc_hw.h index 02aa016b93e9..4168c7b498b8 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_hw.h +++ b/trunk/drivers/scsi/lpfc/lpfc_hw.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -470,35 +470,6 @@ struct serv_parm { /* Structure is in Big Endian format */ uint8_t vendorVersion[16]; }; -/* - * Virtual Fabric Tagging Header - */ -struct fc_vft_header { - uint32_t word0; -#define fc_vft_hdr_r_ctl_SHIFT 24 -#define fc_vft_hdr_r_ctl_MASK 0xFF -#define fc_vft_hdr_r_ctl_WORD word0 -#define fc_vft_hdr_ver_SHIFT 22 -#define fc_vft_hdr_ver_MASK 0x3 -#define fc_vft_hdr_ver_WORD word0 -#define fc_vft_hdr_type_SHIFT 18 -#define fc_vft_hdr_type_MASK 0xF -#define fc_vft_hdr_type_WORD word0 -#define fc_vft_hdr_e_SHIFT 16 -#define fc_vft_hdr_e_MASK 0x1 -#define fc_vft_hdr_e_WORD word0 -#define fc_vft_hdr_priority_SHIFT 13 -#define fc_vft_hdr_priority_MASK 0x7 -#define fc_vft_hdr_priority_WORD word0 -#define fc_vft_hdr_vf_id_SHIFT 1 -#define fc_vft_hdr_vf_id_MASK 0xFFF -#define fc_vft_hdr_vf_id_WORD word0 - uint32_t word1; -#define fc_vft_hdr_hopct_SHIFT 24 -#define fc_vft_hdr_hopct_MASK 0xFF -#define fc_vft_hdr_hopct_WORD word1 -}; - /* * Extended Link Service LS_COMMAND codes (Payload Word 0) */ @@ -1181,9 +1152,6 @@ typedef struct { #define PCI_DEVICE_ID_HORNET 0xfe05 #define PCI_DEVICE_ID_ZEPHYR_SCSP 0xfe11 #define PCI_DEVICE_ID_ZEPHYR_DCSP 0xfe12 -#define PCI_VENDOR_ID_SERVERENGINE 0x19a2 -#define PCI_DEVICE_ID_TIGERSHARK 0x0704 -#define PCI_DEVICE_ID_TIGERSHARK_S 0x0705 #define JEDEC_ID_ADDRESS 0x0080001c #define FIREFLY_JEDEC_ID 0x1ACC @@ -1374,21 +1342,15 @@ typedef struct { /* FireFly BIU registers */ #define MBX_READ_LA64 0x95 #define MBX_REG_VPI 0x96 #define MBX_UNREG_VPI 0x97 +#define MBX_REG_VNPID 0x96 +#define MBX_UNREG_VNPID 0x97 #define MBX_WRITE_WWN 0x98 #define MBX_SET_DEBUG 0x99 #define MBX_LOAD_EXP_ROM 0x9C -#define MBX_SLI4_CONFIG 0x9B -#define MBX_SLI4_REQ_FTRS 0x9D -#define MBX_MAX_CMDS 0x9E -#define MBX_RESUME_RPI 0x9E + +#define MBX_MAX_CMDS 0x9D #define MBX_SLI2_CMD_MASK 0x80 -#define MBX_REG_VFI 0x9F -#define MBX_REG_FCFI 0xA0 -#define MBX_UNREG_VFI 0xA1 -#define MBX_UNREG_FCFI 0xA2 -#define MBX_INIT_VFI 0xA3 -#define MBX_INIT_VPI 0xA4 /* IOCB Commands */ @@ -1478,16 +1440,6 @@ typedef struct { /* FireFly BIU registers */ #define CMD_IOCB_LOGENTRY_CN 0x94 #define CMD_IOCB_LOGENTRY_ASYNC_CN 0x96 -/* Unhandled Data Security SLI Commands */ -#define DSSCMD_IWRITE64_CR 0xD8 -#define DSSCMD_IWRITE64_CX 0xD9 -#define DSSCMD_IREAD64_CR 0xDA -#define DSSCMD_IREAD64_CX 0xDB -#define DSSCMD_INVALIDATE_DEK 0xDC -#define DSSCMD_SET_KEK 0xDD -#define DSSCMD_GET_KEK_ID 0xDE -#define DSSCMD_GEN_XFER 0xDF - #define CMD_MAX_IOCB_CMD 0xE6 #define CMD_IOCB_MASK 0xff @@ -1514,7 +1466,6 @@ typedef struct { /* FireFly BIU registers */ #define MBXERR_BAD_RCV_LENGTH 14 #define MBXERR_DMA_ERROR 15 #define MBXERR_ERROR 16 -#define MBXERR_LINK_DOWN 0x33 #define MBX_NOT_FINISHED 255 #define MBX_BUSY 0xffffff /* Attempted cmd to busy Mailbox */ @@ -1553,6 +1504,32 @@ struct ulp_bde { #endif }; +struct ulp_bde64 { /* SLI-2 */ + union ULP_BDE_TUS { + uint32_t w; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED + VALUE !! */ + uint32_t bdeSize:24; /* Size of buffer (in bytes) */ +#else /* __LITTLE_ENDIAN_BITFIELD */ + uint32_t bdeSize:24; /* Size of buffer (in bytes) */ + uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED + VALUE !! */ +#endif +#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */ +#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */ +#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */ +#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */ +#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */ +#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */ +#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */ + } f; + } tus; + uint32_t addrLow; + uint32_t addrHigh; +}; + typedef struct ULP_BDL { /* SLI-2 */ #ifdef __BIG_ENDIAN_BITFIELD uint32_t bdeFlags:8; /* BDL Flags */ @@ -2310,7 +2287,7 @@ typedef struct { uint32_t rsvd3; uint32_t rsvd4; uint32_t rsvd5; - uint16_t vfi; + uint16_t rsvd6; uint16_t vpi; #else /* __LITTLE_ENDIAN */ uint32_t rsvd1; @@ -2320,7 +2297,7 @@ typedef struct { uint32_t rsvd4; uint32_t rsvd5; uint16_t vpi; - uint16_t vfi; + uint16_t rsvd6; #endif } REG_VPI_VAR; @@ -2480,7 +2457,7 @@ typedef struct { uint32_t entry_index:16; #endif - uint32_t sli4_length; + uint32_t rsvd1; uint32_t word_cnt; uint32_t resp_offset; } DUMP_VAR; @@ -2493,32 +2470,9 @@ typedef struct { #define DMP_RSP_OFFSET 0x14 /* word 5 contains first word of rsp */ #define DMP_RSP_SIZE 0x6C /* maximum of 27 words of rsp data */ -#define DMP_REGION_VPORT 0x16 /* VPort info region */ -#define DMP_VPORT_REGION_SIZE 0x200 -#define DMP_MBOX_OFFSET_WORD 0x5 - -#define DMP_REGION_FCOEPARAM 0x17 /* fcoe param region */ -#define DMP_FCOEPARAM_RGN_SIZE 0x400 - #define WAKE_UP_PARMS_REGION_ID 4 #define WAKE_UP_PARMS_WORD_SIZE 15 -struct vport_rec { - uint8_t wwpn[8]; - uint8_t wwnn[8]; -}; - -#define VPORT_INFO_SIG 0x32324752 -#define VPORT_INFO_REV_MASK 0xff -#define VPORT_INFO_REV 0x1 -#define MAX_STATIC_VPORT_COUNT 16 -struct static_vport_info { - uint32_t signature; - uint32_t rev; - struct vport_rec vport_list[MAX_STATIC_VPORT_COUNT]; - uint32_t resvd[66]; -}; - /* Option rom version structure */ struct prog_id { #ifdef __BIG_ENDIAN_BITFIELD @@ -2743,9 +2697,7 @@ typedef struct { #endif #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd1 : 19; /* Reserved */ - uint32_t cdss : 1; /* Configure Data Security SLI */ - uint32_t rsvd2 : 3; /* Reserved */ + uint32_t rsvd1 : 23; /* Reserved */ uint32_t cbg : 1; /* Configure BlockGuard */ uint32_t cmv : 1; /* Configure Max VPIs */ uint32_t ccrp : 1; /* Config Command Ring Polling */ @@ -2765,14 +2717,10 @@ typedef struct { uint32_t ccrp : 1; /* Config Command Ring Polling */ uint32_t cmv : 1; /* Configure Max VPIs */ uint32_t cbg : 1; /* Configure BlockGuard */ - uint32_t rsvd2 : 3; /* Reserved */ - uint32_t cdss : 1; /* Configure Data Security SLI */ - uint32_t rsvd1 : 19; /* Reserved */ + uint32_t rsvd1 : 23; /* Reserved */ #endif #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd3 : 19; /* Reserved */ - uint32_t gdss : 1; /* Configure Data Security SLI */ - uint32_t rsvd4 : 3; /* Reserved */ + uint32_t rsvd2 : 23; /* Reserved */ uint32_t gbg : 1; /* Grant BlockGuard */ uint32_t gmv : 1; /* Grant Max VPIs */ uint32_t gcrp : 1; /* Grant Command Ring Polling */ @@ -2792,9 +2740,7 @@ typedef struct { uint32_t gcrp : 1; /* Grant Command Ring Polling */ uint32_t gmv : 1; /* Grant Max VPIs */ uint32_t gbg : 1; /* Grant BlockGuard */ - uint32_t rsvd4 : 3; /* Reserved */ - uint32_t gdss : 1; /* Configure Data Security SLI */ - uint32_t rsvd3 : 19; /* Reserved */ + uint32_t rsvd2 : 23; /* Reserved */ #endif #ifdef __BIG_ENDIAN_BITFIELD @@ -2807,20 +2753,20 @@ typedef struct { #ifdef __BIG_ENDIAN_BITFIELD uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ - uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */ + uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ #else /* __LITTLE_ENDIAN */ - uint32_t rsvd5 : 16; /* Max HBQs Host expect to configure */ + uint32_t rsvd3 : 16; /* Max HBQs Host expect to configure */ uint32_t max_hbq : 16; /* Max HBQs Host expect to configure */ #endif - uint32_t rsvd6; /* Reserved */ + uint32_t rsvd4; /* Reserved */ #ifdef __BIG_ENDIAN_BITFIELD - uint32_t rsvd7 : 16; /* Reserved */ + uint32_t rsvd5 : 16; /* Reserved */ uint32_t max_vpi : 16; /* Max number of virt N-Ports */ #else /* __LITTLE_ENDIAN */ uint32_t max_vpi : 16; /* Max number of virt N-Ports */ - uint32_t rsvd7 : 16; /* Reserved */ + uint32_t rsvd5 : 16; /* Reserved */ #endif } CONFIG_PORT_VAR; @@ -3720,5 +3666,3 @@ lpfc_error_lost_link(IOCB_t *iocbp) #define MENLO_TIMEOUT 30 #define SETVAR_MLOMNT 0x103107 #define SETVAR_MLORST 0x103007 - -#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */ diff --git a/trunk/drivers/scsi/lpfc/lpfc_hw4.h b/trunk/drivers/scsi/lpfc/lpfc_hw4.h deleted file mode 100644 index 39c34b3ad29d..000000000000 --- a/trunk/drivers/scsi/lpfc/lpfc_hw4.h +++ /dev/null @@ -1,2141 +0,0 @@ -/******************************************************************* - * This file is part of the Emulex Linux Device Driver for * - * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * - *******************************************************************/ - -/* Macros to deal with bit fields. Each bit field must have 3 #defines - * associated with it (_SHIFT, _MASK, and _WORD). - * EG. For a bit field that is in the 7th bit of the "field4" field of a - * structure and is 2 bits in size the following #defines must exist: - * struct temp { - * uint32_t field1; - * uint32_t field2; - * uint32_t field3; - * uint32_t field4; - * #define example_bit_field_SHIFT 7 - * #define example_bit_field_MASK 0x03 - * #define example_bit_field_WORD field4 - * uint32_t field5; - * }; - * Then the macros below may be used to get or set the value of that field. - * EG. To get the value of the bit field from the above example: - * struct temp t1; - * value = bf_get(example_bit_field, &t1); - * And then to set that bit field: - * bf_set(example_bit_field, &t1, 2); - * Or clear that bit field: - * bf_set(example_bit_field, &t1, 0); - */ -#define bf_get(name, ptr) \ - (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK) -#define bf_set(name, ptr, value) \ - ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \ - ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT)))) - -struct dma_address { - uint32_t addr_lo; - uint32_t addr_hi; -}; - -#define LPFC_SLI4_BAR0 1 -#define LPFC_SLI4_BAR1 2 -#define LPFC_SLI4_BAR2 4 - -#define LPFC_SLI4_MBX_EMBED true -#define LPFC_SLI4_MBX_NEMBED false - -#define LPFC_SLI4_MB_WORD_COUNT 64 -#define LPFC_MAX_MQ_PAGE 8 -#define LPFC_MAX_WQ_PAGE 8 -#define LPFC_MAX_CQ_PAGE 4 -#define LPFC_MAX_EQ_PAGE 8 - -#define LPFC_VIR_FUNC_MAX 32 /* Maximum number of virtual functions */ -#define LPFC_PCI_FUNC_MAX 5 /* Maximum number of PCI functions */ -#define LPFC_VFR_PAGE_SIZE 0x1000 /* 4KB BAR2 per-VF register page size */ - -/* Define SLI4 Alignment requirements. */ -#define LPFC_ALIGN_16_BYTE 16 -#define LPFC_ALIGN_64_BYTE 64 - -/* Define SLI4 specific definitions. */ -#define LPFC_MQ_CQE_BYTE_OFFSET 256 -#define LPFC_MBX_CMD_HDR_LENGTH 16 -#define LPFC_MBX_ERROR_RANGE 0x4000 -#define LPFC_BMBX_BIT1_ADDR_HI 0x2 -#define LPFC_BMBX_BIT1_ADDR_LO 0 -#define LPFC_RPI_HDR_COUNT 64 -#define LPFC_HDR_TEMPLATE_SIZE 4096 -#define LPFC_RPI_ALLOC_ERROR 0xFFFF -#define LPFC_FCF_RECORD_WD_CNT 132 -#define LPFC_ENTIRE_FCF_DATABASE 0 -#define LPFC_DFLT_FCF_INDEX 0 - -/* Virtual function numbers */ -#define LPFC_VF0 0 -#define LPFC_VF1 1 -#define LPFC_VF2 2 -#define LPFC_VF3 3 -#define LPFC_VF4 4 -#define LPFC_VF5 5 -#define LPFC_VF6 6 -#define LPFC_VF7 7 -#define LPFC_VF8 8 -#define LPFC_VF9 9 -#define LPFC_VF10 10 -#define LPFC_VF11 11 -#define LPFC_VF12 12 -#define LPFC_VF13 13 -#define LPFC_VF14 14 -#define LPFC_VF15 15 -#define LPFC_VF16 16 -#define LPFC_VF17 17 -#define LPFC_VF18 18 -#define LPFC_VF19 19 -#define LPFC_VF20 20 -#define LPFC_VF21 21 -#define LPFC_VF22 22 -#define LPFC_VF23 23 -#define LPFC_VF24 24 -#define LPFC_VF25 25 -#define LPFC_VF26 26 -#define LPFC_VF27 27 -#define LPFC_VF28 28 -#define LPFC_VF29 29 -#define LPFC_VF30 30 -#define LPFC_VF31 31 - -/* PCI function numbers */ -#define LPFC_PCI_FUNC0 0 -#define LPFC_PCI_FUNC1 1 -#define LPFC_PCI_FUNC2 2 -#define LPFC_PCI_FUNC3 3 -#define LPFC_PCI_FUNC4 4 - -/* Active interrupt test count */ -#define LPFC_ACT_INTR_CNT 4 - -/* Delay Multiplier constant */ -#define LPFC_DMULT_CONST 651042 -#define LPFC_MIM_IMAX 636 -#define LPFC_FP_DEF_IMAX 10000 -#define LPFC_SP_DEF_IMAX 10000 - -struct ulp_bde64 { - union ULP_BDE_TUS { - uint32_t w; - struct { -#ifdef __BIG_ENDIAN_BITFIELD - uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED - VALUE !! */ - uint32_t bdeSize:24; /* Size of buffer (in bytes) */ -#else /* __LITTLE_ENDIAN_BITFIELD */ - uint32_t bdeSize:24; /* Size of buffer (in bytes) */ - uint32_t bdeFlags:8; /* BDE Flags 0 IS A SUPPORTED - VALUE !! */ -#endif -#define BUFF_TYPE_BDE_64 0x00 /* BDE (Host_resident) */ -#define BUFF_TYPE_BDE_IMMED 0x01 /* Immediate Data BDE */ -#define BUFF_TYPE_BDE_64P 0x02 /* BDE (Port-resident) */ -#define BUFF_TYPE_BDE_64I 0x08 /* Input BDE (Host-resident) */ -#define BUFF_TYPE_BDE_64IP 0x0A /* Input BDE (Port-resident) */ -#define BUFF_TYPE_BLP_64 0x40 /* BLP (Host-resident) */ -#define BUFF_TYPE_BLP_64P 0x42 /* BLP (Port-resident) */ - } f; - } tus; - uint32_t addrLow; - uint32_t addrHigh; -}; - -struct lpfc_sli4_flags { - uint32_t word0; -#define lpfc_fip_flag_SHIFT 0 -#define lpfc_fip_flag_MASK 0x00000001 -#define lpfc_fip_flag_WORD word0 -}; - -/* event queue entry structure */ -struct lpfc_eqe { - uint32_t word0; -#define lpfc_eqe_resource_id_SHIFT 16 -#define lpfc_eqe_resource_id_MASK 0x000000FF -#define lpfc_eqe_resource_id_WORD word0 -#define lpfc_eqe_minor_code_SHIFT 4 -#define lpfc_eqe_minor_code_MASK 0x00000FFF -#define lpfc_eqe_minor_code_WORD word0 -#define lpfc_eqe_major_code_SHIFT 1 -#define lpfc_eqe_major_code_MASK 0x00000007 -#define lpfc_eqe_major_code_WORD word0 -#define lpfc_eqe_valid_SHIFT 0 -#define lpfc_eqe_valid_MASK 0x00000001 -#define lpfc_eqe_valid_WORD word0 -}; - -/* completion queue entry structure (common fields for all cqe types) */ -struct lpfc_cqe { - uint32_t reserved0; - uint32_t reserved1; - uint32_t reserved2; - uint32_t word3; -#define lpfc_cqe_valid_SHIFT 31 -#define lpfc_cqe_valid_MASK 0x00000001 -#define lpfc_cqe_valid_WORD word3 -#define lpfc_cqe_code_SHIFT 16 -#define lpfc_cqe_code_MASK 0x000000FF -#define lpfc_cqe_code_WORD word3 -}; - -/* Completion Queue Entry Status Codes */ -#define CQE_STATUS_SUCCESS 0x0 -#define CQE_STATUS_FCP_RSP_FAILURE 0x1 -#define CQE_STATUS_REMOTE_STOP 0x2 -#define CQE_STATUS_LOCAL_REJECT 0x3 -#define CQE_STATUS_NPORT_RJT 0x4 -#define CQE_STATUS_FABRIC_RJT 0x5 -#define CQE_STATUS_NPORT_BSY 0x6 -#define CQE_STATUS_FABRIC_BSY 0x7 -#define CQE_STATUS_INTERMED_RSP 0x8 -#define CQE_STATUS_LS_RJT 0x9 -#define CQE_STATUS_CMD_REJECT 0xb -#define CQE_STATUS_FCP_TGT_LENCHECK 0xc -#define CQE_STATUS_NEED_BUFF_ENTRY 0xf - -/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */ -#define CQE_HW_STATUS_NO_ERR 0x0 -#define CQE_HW_STATUS_UNDERRUN 0x1 -#define CQE_HW_STATUS_OVERRUN 0x2 - -/* Completion Queue Entry Codes */ -#define CQE_CODE_COMPL_WQE 0x1 -#define CQE_CODE_RELEASE_WQE 0x2 -#define CQE_CODE_RECEIVE 0x4 -#define CQE_CODE_XRI_ABORTED 0x5 - -/* completion queue entry for wqe completions */ -struct lpfc_wcqe_complete { - uint32_t word0; -#define lpfc_wcqe_c_request_tag_SHIFT 16 -#define lpfc_wcqe_c_request_tag_MASK 0x0000FFFF -#define lpfc_wcqe_c_request_tag_WORD word0 -#define lpfc_wcqe_c_status_SHIFT 8 -#define lpfc_wcqe_c_status_MASK 0x000000FF -#define lpfc_wcqe_c_status_WORD word0 -#define lpfc_wcqe_c_hw_status_SHIFT 0 -#define lpfc_wcqe_c_hw_status_MASK 0x000000FF -#define lpfc_wcqe_c_hw_status_WORD word0 - uint32_t total_data_placed; - uint32_t parameter; - uint32_t word3; -#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT -#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK -#define lpfc_wcqe_c_valid_WORD lpfc_cqe_valid_WORD -#define lpfc_wcqe_c_xb_SHIFT 28 -#define lpfc_wcqe_c_xb_MASK 0x00000001 -#define lpfc_wcqe_c_xb_WORD word3 -#define lpfc_wcqe_c_pv_SHIFT 27 -#define lpfc_wcqe_c_pv_MASK 0x00000001 -#define lpfc_wcqe_c_pv_WORD word3 -#define lpfc_wcqe_c_priority_SHIFT 24 -#define lpfc_wcqe_c_priority_MASK 0x00000007 -#define lpfc_wcqe_c_priority_WORD word3 -#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT -#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK -#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD -}; - -/* completion queue entry for wqe release */ -struct lpfc_wcqe_release { - uint32_t reserved0; - uint32_t reserved1; - uint32_t word2; -#define lpfc_wcqe_r_wq_id_SHIFT 16 -#define lpfc_wcqe_r_wq_id_MASK 0x0000FFFF -#define lpfc_wcqe_r_wq_id_WORD word2 -#define lpfc_wcqe_r_wqe_index_SHIFT 0 -#define lpfc_wcqe_r_wqe_index_MASK 0x0000FFFF -#define lpfc_wcqe_r_wqe_index_WORD word2 - uint32_t word3; -#define lpfc_wcqe_r_valid_SHIFT lpfc_cqe_valid_SHIFT -#define lpfc_wcqe_r_valid_MASK lpfc_cqe_valid_MASK -#define lpfc_wcqe_r_valid_WORD lpfc_cqe_valid_WORD -#define lpfc_wcqe_r_code_SHIFT lpfc_cqe_code_SHIFT -#define lpfc_wcqe_r_code_MASK lpfc_cqe_code_MASK -#define lpfc_wcqe_r_code_WORD lpfc_cqe_code_WORD -}; - -struct sli4_wcqe_xri_aborted { - uint32_t word0; -#define lpfc_wcqe_xa_status_SHIFT 8 -#define lpfc_wcqe_xa_status_MASK 0x000000FF -#define lpfc_wcqe_xa_status_WORD word0 - uint32_t parameter; - uint32_t word2; -#define lpfc_wcqe_xa_remote_xid_SHIFT 16 -#define lpfc_wcqe_xa_remote_xid_MASK 0x0000FFFF -#define lpfc_wcqe_xa_remote_xid_WORD word2 -#define lpfc_wcqe_xa_xri_SHIFT 0 -#define lpfc_wcqe_xa_xri_MASK 0x0000FFFF -#define lpfc_wcqe_xa_xri_WORD word2 - uint32_t word3; -#define lpfc_wcqe_xa_valid_SHIFT lpfc_cqe_valid_SHIFT -#define lpfc_wcqe_xa_valid_MASK lpfc_cqe_valid_MASK -#define lpfc_wcqe_xa_valid_WORD lpfc_cqe_valid_WORD -#define lpfc_wcqe_xa_ia_SHIFT 30 -#define lpfc_wcqe_xa_ia_MASK 0x00000001 -#define lpfc_wcqe_xa_ia_WORD word3 -#define CQE_XRI_ABORTED_IA_REMOTE 0 -#define CQE_XRI_ABORTED_IA_LOCAL 1 -#define lpfc_wcqe_xa_br_SHIFT 29 -#define lpfc_wcqe_xa_br_MASK 0x00000001 -#define lpfc_wcqe_xa_br_WORD word3 -#define CQE_XRI_ABORTED_BR_BA_ACC 0 -#define CQE_XRI_ABORTED_BR_BA_RJT 1 -#define lpfc_wcqe_xa_eo_SHIFT 28 -#define lpfc_wcqe_xa_eo_MASK 0x00000001 -#define lpfc_wcqe_xa_eo_WORD word3 -#define CQE_XRI_ABORTED_EO_REMOTE 0 -#define CQE_XRI_ABORTED_EO_LOCAL 1 -#define lpfc_wcqe_xa_code_SHIFT lpfc_cqe_code_SHIFT -#define lpfc_wcqe_xa_code_MASK lpfc_cqe_code_MASK -#define lpfc_wcqe_xa_code_WORD lpfc_cqe_code_WORD -}; - -/* completion queue entry structure for rqe completion */ -struct lpfc_rcqe { - uint32_t word0; -#define lpfc_rcqe_bindex_SHIFT 16 -#define lpfc_rcqe_bindex_MASK 0x0000FFF -#define lpfc_rcqe_bindex_WORD word0 -#define lpfc_rcqe_status_SHIFT 8 -#define lpfc_rcqe_status_MASK 0x000000FF -#define lpfc_rcqe_status_WORD word0 -#define FC_STATUS_RQ_SUCCESS 0x10 /* Async receive successful */ -#define FC_STATUS_RQ_BUF_LEN_EXCEEDED 0x11 /* payload truncated */ -#define FC_STATUS_INSUFF_BUF_NEED_BUF 0x12 /* Insufficient buffers */ -#define FC_STATUS_INSUFF_BUF_FRM_DISC 0x13 /* Frame Discard */ - uint32_t reserved1; - uint32_t word2; -#define lpfc_rcqe_length_SHIFT 16 -#define lpfc_rcqe_length_MASK 0x0000FFFF -#define lpfc_rcqe_length_WORD word2 -#define lpfc_rcqe_rq_id_SHIFT 6 -#define lpfc_rcqe_rq_id_MASK 0x000003FF -#define lpfc_rcqe_rq_id_WORD word2 -#define lpfc_rcqe_fcf_id_SHIFT 0 -#define lpfc_rcqe_fcf_id_MASK 0x0000003F -#define lpfc_rcqe_fcf_id_WORD word2 - uint32_t word3; -#define lpfc_rcqe_valid_SHIFT lpfc_cqe_valid_SHIFT -#define lpfc_rcqe_valid_MASK lpfc_cqe_valid_MASK -#define lpfc_rcqe_valid_WORD lpfc_cqe_valid_WORD -#define lpfc_rcqe_port_SHIFT 30 -#define lpfc_rcqe_port_MASK 0x00000001 -#define lpfc_rcqe_port_WORD word3 -#define lpfc_rcqe_hdr_length_SHIFT 24 -#define lpfc_rcqe_hdr_length_MASK 0x0000001F -#define lpfc_rcqe_hdr_length_WORD word3 -#define lpfc_rcqe_code_SHIFT lpfc_cqe_code_SHIFT -#define lpfc_rcqe_code_MASK lpfc_cqe_code_MASK -#define lpfc_rcqe_code_WORD lpfc_cqe_code_WORD -#define lpfc_rcqe_eof_SHIFT 8 -#define lpfc_rcqe_eof_MASK 0x000000FF -#define lpfc_rcqe_eof_WORD word3 -#define FCOE_EOFn 0x41 -#define FCOE_EOFt 0x42 -#define FCOE_EOFni 0x49 -#define FCOE_EOFa 0x50 -#define lpfc_rcqe_sof_SHIFT 0 -#define lpfc_rcqe_sof_MASK 0x000000FF -#define lpfc_rcqe_sof_WORD word3 -#define FCOE_SOFi2 0x2d -#define FCOE_SOFi3 0x2e -#define FCOE_SOFn2 0x35 -#define FCOE_SOFn3 0x36 -}; - -struct lpfc_wqe_generic{ - struct ulp_bde64 bde; - uint32_t word3; - uint32_t word4; - uint32_t word5; - uint32_t word6; -#define lpfc_wqe_gen_context_SHIFT 16 -#define lpfc_wqe_gen_context_MASK 0x0000FFFF -#define lpfc_wqe_gen_context_WORD word6 -#define lpfc_wqe_gen_xri_SHIFT 0 -#define lpfc_wqe_gen_xri_MASK 0x0000FFFF -#define lpfc_wqe_gen_xri_WORD word6 - uint32_t word7; -#define lpfc_wqe_gen_lnk_SHIFT 23 -#define lpfc_wqe_gen_lnk_MASK 0x00000001 -#define lpfc_wqe_gen_lnk_WORD word7 -#define lpfc_wqe_gen_erp_SHIFT 22 -#define lpfc_wqe_gen_erp_MASK 0x00000001 -#define lpfc_wqe_gen_erp_WORD word7 -#define lpfc_wqe_gen_pu_SHIFT 20 -#define lpfc_wqe_gen_pu_MASK 0x00000003 -#define lpfc_wqe_gen_pu_WORD word7 -#define lpfc_wqe_gen_class_SHIFT 16 -#define lpfc_wqe_gen_class_MASK 0x00000007 -#define lpfc_wqe_gen_class_WORD word7 -#define lpfc_wqe_gen_command_SHIFT 8 -#define lpfc_wqe_gen_command_MASK 0x000000FF -#define lpfc_wqe_gen_command_WORD word7 -#define lpfc_wqe_gen_status_SHIFT 4 -#define lpfc_wqe_gen_status_MASK 0x0000000F -#define lpfc_wqe_gen_status_WORD word7 -#define lpfc_wqe_gen_ct_SHIFT 2 -#define lpfc_wqe_gen_ct_MASK 0x00000007 -#define lpfc_wqe_gen_ct_WORD word7 - uint32_t abort_tag; - uint32_t word9; -#define lpfc_wqe_gen_request_tag_SHIFT 0 -#define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF -#define lpfc_wqe_gen_request_tag_WORD word9 - uint32_t word10; -#define lpfc_wqe_gen_ccp_SHIFT 24 -#define lpfc_wqe_gen_ccp_MASK 0x000000FF -#define lpfc_wqe_gen_ccp_WORD word10 -#define lpfc_wqe_gen_ccpe_SHIFT 23 -#define lpfc_wqe_gen_ccpe_MASK 0x00000001 -#define lpfc_wqe_gen_ccpe_WORD word10 -#define lpfc_wqe_gen_pv_SHIFT 19 -#define lpfc_wqe_gen_pv_MASK 0x00000001 -#define lpfc_wqe_gen_pv_WORD word10 -#define lpfc_wqe_gen_pri_SHIFT 16 -#define lpfc_wqe_gen_pri_MASK 0x00000007 -#define lpfc_wqe_gen_pri_WORD word10 - uint32_t word11; -#define lpfc_wqe_gen_cq_id_SHIFT 16 -#define lpfc_wqe_gen_cq_id_MASK 0x000003FF -#define lpfc_wqe_gen_cq_id_WORD word11 -#define LPFC_WQE_CQ_ID_DEFAULT 0x3ff -#define lpfc_wqe_gen_wqec_SHIFT 7 -#define lpfc_wqe_gen_wqec_MASK 0x00000001 -#define lpfc_wqe_gen_wqec_WORD word11 -#define lpfc_wqe_gen_cmd_type_SHIFT 0 -#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F -#define lpfc_wqe_gen_cmd_type_WORD word11 - uint32_t payload[4]; -}; - -struct lpfc_rqe { - uint32_t address_hi; - uint32_t address_lo; -}; - -/* buffer descriptors */ -struct lpfc_bde4 { - uint32_t addr_hi; - uint32_t addr_lo; - uint32_t word2; -#define lpfc_bde4_last_SHIFT 31 -#define lpfc_bde4_last_MASK 0x00000001 -#define lpfc_bde4_last_WORD word2 -#define lpfc_bde4_sge_offset_SHIFT 0 -#define lpfc_bde4_sge_offset_MASK 0x000003FF -#define lpfc_bde4_sge_offset_WORD word2 - uint32_t word3; -#define lpfc_bde4_length_SHIFT 0 -#define lpfc_bde4_length_MASK 0x000000FF -#define lpfc_bde4_length_WORD word3 -}; - -struct lpfc_register { - uint32_t word0; -}; - -#define LPFC_UERR_STATUS_HI 0x00A4 -#define LPFC_UERR_STATUS_LO 0x00A0 -#define LPFC_ONLINE0 0x00B0 -#define LPFC_ONLINE1 0x00B4 -#define LPFC_SCRATCHPAD 0x0058 - -/* BAR0 Registers */ -#define LPFC_HST_STATE 0x00AC -#define lpfc_hst_state_perr_SHIFT 31 -#define lpfc_hst_state_perr_MASK 0x1 -#define lpfc_hst_state_perr_WORD word0 -#define lpfc_hst_state_sfi_SHIFT 30 -#define lpfc_hst_state_sfi_MASK 0x1 -#define lpfc_hst_state_sfi_WORD word0 -#define lpfc_hst_state_nip_SHIFT 29 -#define lpfc_hst_state_nip_MASK 0x1 -#define lpfc_hst_state_nip_WORD word0 -#define lpfc_hst_state_ipc_SHIFT 28 -#define lpfc_hst_state_ipc_MASK 0x1 -#define lpfc_hst_state_ipc_WORD word0 -#define lpfc_hst_state_xrom_SHIFT 27 -#define lpfc_hst_state_xrom_MASK 0x1 -#define lpfc_hst_state_xrom_WORD word0 -#define lpfc_hst_state_dl_SHIFT 26 -#define lpfc_hst_state_dl_MASK 0x1 -#define lpfc_hst_state_dl_WORD word0 -#define lpfc_hst_state_port_status_SHIFT 0 -#define lpfc_hst_state_port_status_MASK 0xFFFF -#define lpfc_hst_state_port_status_WORD word0 - -#define LPFC_POST_STAGE_POWER_ON_RESET 0x0000 -#define LPFC_POST_STAGE_AWAITING_HOST_RDY 0x0001 -#define LPFC_POST_STAGE_HOST_RDY 0x0002 -#define LPFC_POST_STAGE_BE_RESET 0x0003 -#define LPFC_POST_STAGE_SEEPROM_CS_START 0x0100 -#define LPFC_POST_STAGE_SEEPROM_CS_DONE 0x0101 -#define LPFC_POST_STAGE_DDR_CONFIG_START 0x0200 -#define LPFC_POST_STAGE_DDR_CONFIG_DONE 0x0201 -#define LPFC_POST_STAGE_DDR_CALIBRATE_START 0x0300 -#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE 0x0301 -#define LPFC_POST_STAGE_DDR_TEST_START 0x0400 -#define LPFC_POST_STAGE_DDR_TEST_DONE 0x0401 -#define LPFC_POST_STAGE_REDBOOT_INIT_START 0x0600 -#define LPFC_POST_STAGE_REDBOOT_INIT_DONE 0x0601 -#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START 0x0700 -#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE 0x0701 -#define LPFC_POST_STAGE_ARMFW_START 0x0800 -#define LPFC_POST_STAGE_DHCP_QUERY_START 0x0900 -#define LPFC_POST_STAGE_DHCP_QUERY_DONE 0x0901 -#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START 0x0A00 -#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE 0x0A01 -#define LPFC_POST_STAGE_RC_OPTION_SET 0x0B00 -#define LPFC_POST_STAGE_SWITCH_LINK 0x0B01 -#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE 0x0B02 -#define LPFC_POST_STAGE_PERFROM_TFTP 0x0B03 -#define LPFC_POST_STAGE_PARSE_XML 0x0B04 -#define LPFC_POST_STAGE_DOWNLOAD_IMAGE 0x0B05 -#define LPFC_POST_STAGE_FLASH_IMAGE 0x0B06 -#define LPFC_POST_STAGE_RC_DONE 0x0B07 -#define LPFC_POST_STAGE_REBOOT_SYSTEM 0x0B08 -#define LPFC_POST_STAGE_MAC_ADDRESS 0x0C00 -#define LPFC_POST_STAGE_ARMFW_READY 0xC000 -#define LPFC_POST_STAGE_ARMFW_UE 0xF000 - -#define lpfc_scratchpad_slirev_SHIFT 4 -#define lpfc_scratchpad_slirev_MASK 0xF -#define lpfc_scratchpad_slirev_WORD word0 -#define lpfc_scratchpad_chiptype_SHIFT 8 -#define lpfc_scratchpad_chiptype_MASK 0xFF -#define lpfc_scratchpad_chiptype_WORD word0 -#define lpfc_scratchpad_featurelevel1_SHIFT 16 -#define lpfc_scratchpad_featurelevel1_MASK 0xFF -#define lpfc_scratchpad_featurelevel1_WORD word0 -#define lpfc_scratchpad_featurelevel2_SHIFT 24 -#define lpfc_scratchpad_featurelevel2_MASK 0xFF -#define lpfc_scratchpad_featurelevel2_WORD word0 - -/* BAR1 Registers */ -#define LPFC_IMR_MASK_ALL 0xFFFFFFFF -#define LPFC_ISCR_CLEAR_ALL 0xFFFFFFFF - -#define LPFC_HST_ISR0 0x0C18 -#define LPFC_HST_ISR1 0x0C1C -#define LPFC_HST_ISR2 0x0C20 -#define LPFC_HST_ISR3 0x0C24 -#define LPFC_HST_ISR4 0x0C28 - -#define LPFC_HST_IMR0 0x0C48 -#define LPFC_HST_IMR1 0x0C4C -#define LPFC_HST_IMR2 0x0C50 -#define LPFC_HST_IMR3 0x0C54 -#define LPFC_HST_IMR4 0x0C58 - -#define LPFC_HST_ISCR0 0x0C78 -#define LPFC_HST_ISCR1 0x0C7C -#define LPFC_HST_ISCR2 0x0C80 -#define LPFC_HST_ISCR3 0x0C84 -#define LPFC_HST_ISCR4 0x0C88 - -#define LPFC_SLI4_INTR0 BIT0 -#define LPFC_SLI4_INTR1 BIT1 -#define LPFC_SLI4_INTR2 BIT2 -#define LPFC_SLI4_INTR3 BIT3 -#define LPFC_SLI4_INTR4 BIT4 -#define LPFC_SLI4_INTR5 BIT5 -#define LPFC_SLI4_INTR6 BIT6 -#define LPFC_SLI4_INTR7 BIT7 -#define LPFC_SLI4_INTR8 BIT8 -#define LPFC_SLI4_INTR9 BIT9 -#define LPFC_SLI4_INTR10 BIT10 -#define LPFC_SLI4_INTR11 BIT11 -#define LPFC_SLI4_INTR12 BIT12 -#define LPFC_SLI4_INTR13 BIT13 -#define LPFC_SLI4_INTR14 BIT14 -#define LPFC_SLI4_INTR15 BIT15 -#define LPFC_SLI4_INTR16 BIT16 -#define LPFC_SLI4_INTR17 BIT17 -#define LPFC_SLI4_INTR18 BIT18 -#define LPFC_SLI4_INTR19 BIT19 -#define LPFC_SLI4_INTR20 BIT20 -#define LPFC_SLI4_INTR21 BIT21 -#define LPFC_SLI4_INTR22 BIT22 -#define LPFC_SLI4_INTR23 BIT23 -#define LPFC_SLI4_INTR24 BIT24 -#define LPFC_SLI4_INTR25 BIT25 -#define LPFC_SLI4_INTR26 BIT26 -#define LPFC_SLI4_INTR27 BIT27 -#define LPFC_SLI4_INTR28 BIT28 -#define LPFC_SLI4_INTR29 BIT29 -#define LPFC_SLI4_INTR30 BIT30 -#define LPFC_SLI4_INTR31 BIT31 - -/* BAR2 Registers */ -#define LPFC_RQ_DOORBELL 0x00A0 -#define lpfc_rq_doorbell_num_posted_SHIFT 16 -#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF -#define lpfc_rq_doorbell_num_posted_WORD word0 -#define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */ -#define lpfc_rq_doorbell_id_SHIFT 0 -#define lpfc_rq_doorbell_id_MASK 0x03FF -#define lpfc_rq_doorbell_id_WORD word0 - -#define LPFC_WQ_DOORBELL 0x0040 -#define lpfc_wq_doorbell_num_posted_SHIFT 24 -#define lpfc_wq_doorbell_num_posted_MASK 0x00FF -#define lpfc_wq_doorbell_num_posted_WORD word0 -#define lpfc_wq_doorbell_index_SHIFT 16 -#define lpfc_wq_doorbell_index_MASK 0x00FF -#define lpfc_wq_doorbell_index_WORD word0 -#define lpfc_wq_doorbell_id_SHIFT 0 -#define lpfc_wq_doorbell_id_MASK 0xFFFF -#define lpfc_wq_doorbell_id_WORD word0 - -#define LPFC_EQCQ_DOORBELL 0x0120 -#define lpfc_eqcq_doorbell_arm_SHIFT 29 -#define lpfc_eqcq_doorbell_arm_MASK 0x0001 -#define lpfc_eqcq_doorbell_arm_WORD word0 -#define lpfc_eqcq_doorbell_num_released_SHIFT 16 -#define lpfc_eqcq_doorbell_num_released_MASK 0x1FFF -#define lpfc_eqcq_doorbell_num_released_WORD word0 -#define lpfc_eqcq_doorbell_qt_SHIFT 10 -#define lpfc_eqcq_doorbell_qt_MASK 0x0001 -#define lpfc_eqcq_doorbell_qt_WORD word0 -#define LPFC_QUEUE_TYPE_COMPLETION 0 -#define LPFC_QUEUE_TYPE_EVENT 1 -#define lpfc_eqcq_doorbell_eqci_SHIFT 9 -#define lpfc_eqcq_doorbell_eqci_MASK 0x0001 -#define lpfc_eqcq_doorbell_eqci_WORD word0 -#define lpfc_eqcq_doorbell_cqid_SHIFT 0 -#define lpfc_eqcq_doorbell_cqid_MASK 0x03FF -#define lpfc_eqcq_doorbell_cqid_WORD word0 -#define lpfc_eqcq_doorbell_eqid_SHIFT 0 -#define lpfc_eqcq_doorbell_eqid_MASK 0x01FF -#define lpfc_eqcq_doorbell_eqid_WORD word0 - -#define LPFC_BMBX 0x0160 -#define lpfc_bmbx_addr_SHIFT 2 -#define lpfc_bmbx_addr_MASK 0x3FFFFFFF -#define lpfc_bmbx_addr_WORD word0 -#define lpfc_bmbx_hi_SHIFT 1 -#define lpfc_bmbx_hi_MASK 0x0001 -#define lpfc_bmbx_hi_WORD word0 -#define lpfc_bmbx_rdy_SHIFT 0 -#define lpfc_bmbx_rdy_MASK 0x0001 -#define lpfc_bmbx_rdy_WORD word0 - -#define LPFC_MQ_DOORBELL 0x0140 -#define lpfc_mq_doorbell_num_posted_SHIFT 16 -#define lpfc_mq_doorbell_num_posted_MASK 0x3FFF -#define lpfc_mq_doorbell_num_posted_WORD word0 -#define lpfc_mq_doorbell_id_SHIFT 0 -#define lpfc_mq_doorbell_id_MASK 0x03FF -#define lpfc_mq_doorbell_id_WORD word0 - -struct lpfc_sli4_cfg_mhdr { - uint32_t word1; -#define lpfc_mbox_hdr_emb_SHIFT 0 -#define lpfc_mbox_hdr_emb_MASK 0x00000001 -#define lpfc_mbox_hdr_emb_WORD word1 -#define lpfc_mbox_hdr_sge_cnt_SHIFT 3 -#define lpfc_mbox_hdr_sge_cnt_MASK 0x0000001F -#define lpfc_mbox_hdr_sge_cnt_WORD word1 - uint32_t payload_length; - uint32_t tag_lo; - uint32_t tag_hi; - uint32_t reserved5; -}; - -union lpfc_sli4_cfg_shdr { - struct { - uint32_t word6; -#define lpfc_mbox_hdr_opcode_SHIFT 0 -#define lpfc_mbox_hdr_opcode_MASK 0x000000FF -#define lpfc_mbox_hdr_opcode_WORD word6 -#define lpfc_mbox_hdr_subsystem_SHIFT 8 -#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF -#define lpfc_mbox_hdr_subsystem_WORD word6 -#define lpfc_mbox_hdr_port_number_SHIFT 16 -#define lpfc_mbox_hdr_port_number_MASK 0x000000FF -#define lpfc_mbox_hdr_port_number_WORD word6 -#define lpfc_mbox_hdr_domain_SHIFT 24 -#define lpfc_mbox_hdr_domain_MASK 0x000000FF -#define lpfc_mbox_hdr_domain_WORD word6 - uint32_t timeout; - uint32_t request_length; - uint32_t reserved9; - } request; - struct { - uint32_t word6; -#define lpfc_mbox_hdr_opcode_SHIFT 0 -#define lpfc_mbox_hdr_opcode_MASK 0x000000FF -#define lpfc_mbox_hdr_opcode_WORD word6 -#define lpfc_mbox_hdr_subsystem_SHIFT 8 -#define lpfc_mbox_hdr_subsystem_MASK 0x000000FF -#define lpfc_mbox_hdr_subsystem_WORD word6 -#define lpfc_mbox_hdr_domain_SHIFT 24 -#define lpfc_mbox_hdr_domain_MASK 0x000000FF -#define lpfc_mbox_hdr_domain_WORD word6 - uint32_t word7; -#define lpfc_mbox_hdr_status_SHIFT 0 -#define lpfc_mbox_hdr_status_MASK 0x000000FF -#define lpfc_mbox_hdr_status_WORD word7 -#define lpfc_mbox_hdr_add_status_SHIFT 8 -#define lpfc_mbox_hdr_add_status_MASK 0x000000FF -#define lpfc_mbox_hdr_add_status_WORD word7 - uint32_t response_length; - uint32_t actual_response_length; - } response; -}; - -/* Mailbox structures */ -struct mbox_header { - struct lpfc_sli4_cfg_mhdr cfg_mhdr; - union lpfc_sli4_cfg_shdr cfg_shdr; -}; - -/* Subsystem Definitions */ -#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1 -#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC - -/* Device Specific Definitions */ - -/* The HOST ENDIAN defines are in Big Endian format. */ -#define HOST_ENDIAN_LOW_WORD0 0xFF3412FF -#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF - -/* Common Opcodes */ -#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C -#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D -#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15 -#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20 -#define LPFC_MBOX_OPCODE_NOP 0x21 -#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 -#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 -#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 -#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D - -/* FCoE Opcodes */ -#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01 -#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY 0x02 -#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES 0x03 -#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES 0x04 -#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE 0x05 -#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY 0x06 -#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE 0x08 -#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF 0x09 -#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A -#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B - -/* Mailbox command structures */ -struct eq_context { - uint32_t word0; -#define lpfc_eq_context_size_SHIFT 31 -#define lpfc_eq_context_size_MASK 0x00000001 -#define lpfc_eq_context_size_WORD word0 -#define LPFC_EQE_SIZE_4 0x0 -#define LPFC_EQE_SIZE_16 0x1 -#define lpfc_eq_context_valid_SHIFT 29 -#define lpfc_eq_context_valid_MASK 0x00000001 -#define lpfc_eq_context_valid_WORD word0 - uint32_t word1; -#define lpfc_eq_context_count_SHIFT 26 -#define lpfc_eq_context_count_MASK 0x00000003 -#define lpfc_eq_context_count_WORD word1 -#define LPFC_EQ_CNT_256 0x0 -#define LPFC_EQ_CNT_512 0x1 -#define LPFC_EQ_CNT_1024 0x2 -#define LPFC_EQ_CNT_2048 0x3 -#define LPFC_EQ_CNT_4096 0x4 - uint32_t word2; -#define lpfc_eq_context_delay_multi_SHIFT 13 -#define lpfc_eq_context_delay_multi_MASK 0x000003FF -#define lpfc_eq_context_delay_multi_WORD word2 - uint32_t reserved3; -}; - -struct sgl_page_pairs { - uint32_t sgl_pg0_addr_lo; - uint32_t sgl_pg0_addr_hi; - uint32_t sgl_pg1_addr_lo; - uint32_t sgl_pg1_addr_hi; -}; - -struct lpfc_mbx_post_sgl_pages { - struct mbox_header header; - uint32_t word0; -#define lpfc_post_sgl_pages_xri_SHIFT 0 -#define lpfc_post_sgl_pages_xri_MASK 0x0000FFFF -#define lpfc_post_sgl_pages_xri_WORD word0 -#define lpfc_post_sgl_pages_xricnt_SHIFT 16 -#define lpfc_post_sgl_pages_xricnt_MASK 0x0000FFFF -#define lpfc_post_sgl_pages_xricnt_WORD word0 - struct sgl_page_pairs sgl_pg_pairs[1]; -}; - -/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */ -struct lpfc_mbx_post_uembed_sgl_page1 { - union lpfc_sli4_cfg_shdr cfg_shdr; - uint32_t word0; - struct sgl_page_pairs sgl_pg_pairs; -}; - -struct lpfc_mbx_sge { - uint32_t pa_lo; - uint32_t pa_hi; - uint32_t length; -}; - -struct lpfc_mbx_nembed_cmd { - struct lpfc_sli4_cfg_mhdr cfg_mhdr; -#define LPFC_SLI4_MBX_SGE_MAX_PAGES 19 - struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES]; -}; - -struct lpfc_mbx_nembed_sge_virt { - void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES]; -}; - -struct lpfc_mbx_eq_create { - struct mbox_header header; - union { - struct { - uint32_t word0; -#define lpfc_mbx_eq_create_num_pages_SHIFT 0 -#define lpfc_mbx_eq_create_num_pages_MASK 0x0000FFFF -#define lpfc_mbx_eq_create_num_pages_WORD word0 - struct eq_context context; - struct dma_address page[LPFC_MAX_EQ_PAGE]; - } request; - struct { - uint32_t word0; -#define lpfc_mbx_eq_create_q_id_SHIFT 0 -#define lpfc_mbx_eq_create_q_id_MASK 0x0000FFFF -#define lpfc_mbx_eq_create_q_id_WORD word0 - } response; - } u; -}; - -struct lpfc_mbx_eq_destroy { - struct mbox_header header; - union { - struct { - uint32_t word0; -#define lpfc_mbx_eq_destroy_q_id_SHIFT 0 -#define lpfc_mbx_eq_destroy_q_id_MASK 0x0000FFFF -#define lpfc_mbx_eq_destroy_q_id_WORD word0 - } request; - struct { - uint32_t word0; - } response; - } u; -}; - -struct lpfc_mbx_nop { - struct mbox_header header; - uint32_t context[2]; -}; - -struct cq_context { - uint32_t word0; -#define lpfc_cq_context_event_SHIFT 31 -#define lpfc_cq_context_event_MASK 0x00000001 -#define lpfc_cq_context_event_WORD word0 -#define lpfc_cq_context_valid_SHIFT 29 -#define lpfc_cq_context_valid_MASK 0x00000001 -#define lpfc_cq_context_valid_WORD word0 -#define lpfc_cq_context_count_SHIFT 27 -#define lpfc_cq_context_count_MASK 0x00000003 -#define lpfc_cq_context_count_WORD word0 -#define LPFC_CQ_CNT_256 0x0 -#define LPFC_CQ_CNT_512 0x1 -#define LPFC_CQ_CNT_1024 0x2 - uint32_t word1; -#define lpfc_cq_eq_id_SHIFT 22 -#define lpfc_cq_eq_id_MASK 0x000000FF -#define lpfc_cq_eq_id_WORD word1 - uint32_t reserved0; - uint32_t reserved1; -}; - -struct lpfc_mbx_cq_create { - struct mbox_header header; - union { - struct { - uint32_t word0; -#define lpfc_mbx_cq_create_num_pages_SHIFT 0 -#define lpfc_mbx_cq_create_num_pages_MASK 0x0000FFFF -#define lpfc_mbx_cq_create_num_pages_WORD word0 - struct cq_context context; - struct dma_address page[LPFC_MAX_CQ_PAGE]; - } request; - struct { - uint32_t word0; -#define lpfc_mbx_cq_create_q_id_SHIFT 0 -#define lpfc_mbx_cq_create_q_id_MASK 0x0000FFFF -#define lpfc_mbx_cq_create_q_id_WORD word0 - } response; - } u; -}; - -struct lpfc_mbx_cq_destroy { - struct mbox_header header; - union { - struct { - uint32_t word0; -#define lpfc_mbx_cq_destroy_q_id_SHIFT 0 -#define lpfc_mbx_cq_destroy_q_id_MASK 0x0000FFFF -#define lpfc_mbx_cq_destroy_q_id_WORD word0 - } request; - struct { - uint32_t word0; - } response; - } u; -}; - -struct wq_context { - uint32_t reserved0; - uint32_t reserved1; - uint32_t reserved2; - uint32_t reserved3; -}; - -struct lpfc_mbx_wq_create { - struct mbox_header header; - union { - struct { - uint32_t word0; -#define lpfc_mbx_wq_create_num_pages_SHIFT 0 -#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF -#define lpfc_mbx_wq_create_num_pages_WORD word0 -#define lpfc_mbx_wq_create_cq_id_SHIFT 16 -#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF -#define lpfc_mbx_wq_create_cq_id_WORD word0 - struct dma_address page[LPFC_MAX_WQ_PAGE]; - } request; - struct { - uint32_t word0; -#define lpfc_mbx_wq_create_q_id_SHIFT 0 -#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF -#define lpfc_mbx_wq_create_q_id_WORD word0 - } response; - } u; -}; - -struct lpfc_mbx_wq_destroy { - struct mbox_header header; - union { - struct { - uint32_t word0; -#define lpfc_mbx_wq_destroy_q_id_SHIFT 0 -#define lpfc_mbx_wq_destroy_q_id_MASK 0x0000FFFF -#define lpfc_mbx_wq_destroy_q_id_WORD word0 - } request; - struct { - uint32_t word0; - } response; - } u; -}; - -#define LPFC_HDR_BUF_SIZE 128 -#define LPFC_DATA_BUF_SIZE 4096 -struct rq_context { - uint32_t word0; -#define lpfc_rq_context_rq_size_SHIFT 16 -#define lpfc_rq_context_rq_size_MASK 0x0000000F -#define lpfc_rq_context_rq_size_WORD word0 -#define LPFC_RQ_RING_SIZE_512 9 /* 512 entries */ -#define LPFC_RQ_RING_SIZE_1024 10 /* 1024 entries */ -#define LPFC_RQ_RING_SIZE_2048 11 /* 2048 entries */ -#define LPFC_RQ_RING_SIZE_4096 12 /* 4096 entries */ - uint32_t reserved1; - uint32_t word2; -#define lpfc_rq_context_cq_id_SHIFT 16 -#define lpfc_rq_context_cq_id_MASK 0x000003FF -#define lpfc_rq_context_cq_id_WORD word2 -#define lpfc_rq_context_buf_size_SHIFT 0 -#define lpfc_rq_context_buf_size_MASK 0x0000FFFF -#define lpfc_rq_context_buf_size_WORD word2 - uint32_t reserved3; -}; - -struct lpfc_mbx_rq_create { - struct mbox_header header; - union { - struct { - uint32_t word0; -#define lpfc_mbx_rq_create_num_pages_SHIFT 0 -#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF -#define lpfc_mbx_rq_create_num_pages_WORD word0 - struct rq_context context; - struct dma_address page[LPFC_MAX_WQ_PAGE]; - } request; - struct { - uint32_t word0; -#define lpfc_mbx_rq_create_q_id_SHIFT 0 -#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF -#define lpfc_mbx_rq_create_q_id_WORD word0 - } response; - } u; -}; - -struct lpfc_mbx_rq_destroy { - struct mbox_header header; - union { - struct { - uint32_t word0; -#define lpfc_mbx_rq_destroy_q_id_SHIFT 0 -#define lpfc_mbx_rq_destroy_q_id_MASK 0x0000FFFF -#define lpfc_mbx_rq_destroy_q_id_WORD word0 - } request; - struct { - uint32_t word0; - } response; - } u; -}; - -struct mq_context { - uint32_t word0; -#define lpfc_mq_context_cq_id_SHIFT 22 -#define lpfc_mq_context_cq_id_MASK 0x000003FF -#define lpfc_mq_context_cq_id_WORD word0 -#define lpfc_mq_context_count_SHIFT 16 -#define lpfc_mq_context_count_MASK 0x0000000F -#define lpfc_mq_context_count_WORD word0 -#define LPFC_MQ_CNT_16 0x5 -#define LPFC_MQ_CNT_32 0x6 -#define LPFC_MQ_CNT_64 0x7 -#define LPFC_MQ_CNT_128 0x8 - uint32_t word1; -#define lpfc_mq_context_valid_SHIFT 31 -#define lpfc_mq_context_valid_MASK 0x00000001 -#define lpfc_mq_context_valid_WORD word1 - uint32_t reserved2; - uint32_t reserved3; -}; - -struct lpfc_mbx_mq_create { - struct mbox_header header; - union { - struct { - uint32_t word0; -#define lpfc_mbx_mq_create_num_pages_SHIFT 0 -#define lpfc_mbx_mq_create_num_pages_MASK 0x0000FFFF -#define lpfc_mbx_mq_create_num_pages_WORD word0 - struct mq_context context; - struct dma_address page[LPFC_MAX_MQ_PAGE]; - } request; - struct { - uint32_t word0; -#define lpfc_mbx_mq_create_q_id_SHIFT 0 -#define lpfc_mbx_mq_create_q_id_MASK 0x0000FFFF -#define lpfc_mbx_mq_create_q_id_WORD word0 - } response; - } u; -}; - -struct lpfc_mbx_mq_destroy { - struct mbox_header header; - union { - struct { - uint32_t word0; -#define lpfc_mbx_mq_destroy_q_id_SHIFT 0 -#define lpfc_mbx_mq_destroy_q_id_MASK 0x0000FFFF -#define lpfc_mbx_mq_destroy_q_id_WORD word0 - } request; - struct { - uint32_t word0; - } response; - } u; -}; - -struct lpfc_mbx_post_hdr_tmpl { - struct mbox_header header; - uint32_t word10; -#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT 0 -#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK 0x0000FFFF -#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD word10 -#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT 16 -#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK 0x0000FFFF -#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD word10 - uint32_t rpi_paddr_lo; - uint32_t rpi_paddr_hi; -}; - -struct sli4_sge { /* SLI-4 */ - uint32_t addr_hi; - uint32_t addr_lo; - - uint32_t word2; -#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/ -#define lpfc_sli4_sge_offset_MASK 0x00FFFFFF -#define lpfc_sli4_sge_offset_WORD word2 -#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets - this flag !! */ -#define lpfc_sli4_sge_last_MASK 0x00000001 -#define lpfc_sli4_sge_last_WORD word2 - uint32_t word3; -#define lpfc_sli4_sge_len_SHIFT 0 -#define lpfc_sli4_sge_len_MASK 0x0001FFFF -#define lpfc_sli4_sge_len_WORD word3 -}; - -struct fcf_record { - uint32_t max_rcv_size; - uint32_t fka_adv_period; - uint32_t fip_priority; - uint32_t word3; -#define lpfc_fcf_record_mac_0_SHIFT 0 -#define lpfc_fcf_record_mac_0_MASK 0x000000FF -#define lpfc_fcf_record_mac_0_WORD word3 -#define lpfc_fcf_record_mac_1_SHIFT 8 -#define lpfc_fcf_record_mac_1_MASK 0x000000FF -#define lpfc_fcf_record_mac_1_WORD word3 -#define lpfc_fcf_record_mac_2_SHIFT 16 -#define lpfc_fcf_record_mac_2_MASK 0x000000FF -#define lpfc_fcf_record_mac_2_WORD word3 -#define lpfc_fcf_record_mac_3_SHIFT 24 -#define lpfc_fcf_record_mac_3_MASK 0x000000FF -#define lpfc_fcf_record_mac_3_WORD word3 - uint32_t word4; -#define lpfc_fcf_record_mac_4_SHIFT 0 -#define lpfc_fcf_record_mac_4_MASK 0x000000FF -#define lpfc_fcf_record_mac_4_WORD word4 -#define lpfc_fcf_record_mac_5_SHIFT 8 -#define lpfc_fcf_record_mac_5_MASK 0x000000FF -#define lpfc_fcf_record_mac_5_WORD word4 -#define lpfc_fcf_record_fcf_avail_SHIFT 16 -#define lpfc_fcf_record_fcf_avail_MASK 0x000000FF -#define lpfc_fcf_record_fc_avail_WORD word4 -#define lpfc_fcf_record_mac_addr_prov_SHIFT 24 -#define lpfc_fcf_record_mac_addr_prov_MASK 0x000000FF -#define lpfc_fcf_record_mac_addr_prov_WORD word4 -#define LPFC_FCF_FPMA 1 /* Fabric Provided MAC Address */ -#define LPFC_FCF_SPMA 2 /* Server Provided MAC Address */ - uint32_t word5; -#define lpfc_fcf_record_fab_name_0_SHIFT 0 -#define lpfc_fcf_record_fab_name_0_MASK 0x000000FF -#define lpfc_fcf_record_fab_name_0_WORD word5 -#define lpfc_fcf_record_fab_name_1_SHIFT 8 -#define lpfc_fcf_record_fab_name_1_MASK 0x000000FF -#define lpfc_fcf_record_fab_name_1_WORD word5 -#define lpfc_fcf_record_fab_name_2_SHIFT 16 -#define lpfc_fcf_record_fab_name_2_MASK 0x000000FF -#define lpfc_fcf_record_fab_name_2_WORD word5 -#define lpfc_fcf_record_fab_name_3_SHIFT 24 -#define lpfc_fcf_record_fab_name_3_MASK 0x000000FF -#define lpfc_fcf_record_fab_name_3_WORD word5 - uint32_t word6; -#define lpfc_fcf_record_fab_name_4_SHIFT 0 -#define lpfc_fcf_record_fab_name_4_MASK 0x000000FF -#define lpfc_fcf_record_fab_name_4_WORD word6 -#define lpfc_fcf_record_fab_name_5_SHIFT 8 -#define lpfc_fcf_record_fab_name_5_MASK 0x000000FF -#define lpfc_fcf_record_fab_name_5_WORD word6 -#define lpfc_fcf_record_fab_name_6_SHIFT 16 -#define lpfc_fcf_record_fab_name_6_MASK 0x000000FF -#define lpfc_fcf_record_fab_name_6_WORD word6 -#define lpfc_fcf_record_fab_name_7_SHIFT 24 -#define lpfc_fcf_record_fab_name_7_MASK 0x000000FF -#define lpfc_fcf_record_fab_name_7_WORD word6 - uint32_t word7; -#define lpfc_fcf_record_fc_map_0_SHIFT 0 -#define lpfc_fcf_record_fc_map_0_MASK 0x000000FF -#define lpfc_fcf_record_fc_map_0_WORD word7 -#define lpfc_fcf_record_fc_map_1_SHIFT 8 -#define lpfc_fcf_record_fc_map_1_MASK 0x000000FF -#define lpfc_fcf_record_fc_map_1_WORD word7 -#define lpfc_fcf_record_fc_map_2_SHIFT 16 -#define lpfc_fcf_record_fc_map_2_MASK 0x000000FF -#define lpfc_fcf_record_fc_map_2_WORD word7 -#define lpfc_fcf_record_fcf_valid_SHIFT 24 -#define lpfc_fcf_record_fcf_valid_MASK 0x000000FF -#define lpfc_fcf_record_fcf_valid_WORD word7 - uint32_t word8; -#define lpfc_fcf_record_fcf_index_SHIFT 0 -#define lpfc_fcf_record_fcf_index_MASK 0x0000FFFF -#define lpfc_fcf_record_fcf_index_WORD word8 -#define lpfc_fcf_record_fcf_state_SHIFT 16 -#define lpfc_fcf_record_fcf_state_MASK 0x0000FFFF -#define lpfc_fcf_record_fcf_state_WORD word8 - uint8_t vlan_bitmap[512]; -}; - -struct lpfc_mbx_read_fcf_tbl { - union lpfc_sli4_cfg_shdr cfg_shdr; - union { - struct { - uint32_t word10; -#define lpfc_mbx_read_fcf_tbl_indx_SHIFT 0 -#define lpfc_mbx_read_fcf_tbl_indx_MASK 0x0000FFFF -#define lpfc_mbx_read_fcf_tbl_indx_WORD word10 - } request; - struct { - uint32_t eventag; - } response; - } u; - uint32_t word11; -#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT 0 -#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK 0x0000FFFF -#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD word11 -}; - -struct lpfc_mbx_add_fcf_tbl_entry { - union lpfc_sli4_cfg_shdr cfg_shdr; - uint32_t word10; -#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT 0 -#define lpfc_mbx_add_fcf_tbl_fcfi_MASK 0x0000FFFF -#define lpfc_mbx_add_fcf_tbl_fcfi_WORD word10 - struct lpfc_mbx_sge fcf_sge; -}; - -struct lpfc_mbx_del_fcf_tbl_entry { - struct mbox_header header; - uint32_t word10; -#define lpfc_mbx_del_fcf_tbl_count_SHIFT 0 -#define lpfc_mbx_del_fcf_tbl_count_MASK 0x0000FFFF -#define lpfc_mbx_del_fcf_tbl_count_WORD word10 -#define lpfc_mbx_del_fcf_tbl_index_SHIFT 16 -#define lpfc_mbx_del_fcf_tbl_index_MASK 0x0000FFFF -#define lpfc_mbx_del_fcf_tbl_index_WORD word10 -}; - -/* Status field for embedded SLI_CONFIG mailbox command */ -#define STATUS_SUCCESS 0x0 -#define STATUS_FAILED 0x1 -#define STATUS_ILLEGAL_REQUEST 0x2 -#define STATUS_ILLEGAL_FIELD 0x3 -#define STATUS_INSUFFICIENT_BUFFER 0x4 -#define STATUS_UNAUTHORIZED_REQUEST 0x5 -#define STATUS_FLASHROM_SAVE_FAILED 0x17 -#define STATUS_FLASHROM_RESTORE_FAILED 0x18 -#define STATUS_ICCBINDEX_ALLOC_FAILED 0x1a -#define STATUS_IOCTLHANDLE_ALLOC_FAILED 0x1b -#define STATUS_INVALID_PHY_ADDR_FROM_OSM 0x1c -#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM 0x1d -#define STATUS_ASSERT_FAILED 0x1e -#define STATUS_INVALID_SESSION 0x1f -#define STATUS_INVALID_CONNECTION 0x20 -#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT 0x21 -#define STATUS_BTL_NO_FREE_SLOT_PATH 0x24 -#define STATUS_BTL_NO_FREE_SLOT_TGTID 0x25 -#define STATUS_OSM_DEVSLOT_NOT_FOUND 0x26 -#define STATUS_FLASHROM_READ_FAILED 0x27 -#define STATUS_POLL_IOCTL_TIMEOUT 0x28 -#define STATUS_ERROR_ACITMAIN 0x2a -#define STATUS_REBOOT_REQUIRED 0x2c -#define STATUS_FCF_IN_USE 0x3a - -struct lpfc_mbx_sli4_config { - struct mbox_header header; -}; - -struct lpfc_mbx_init_vfi { - uint32_t word1; -#define lpfc_init_vfi_vr_SHIFT 31 -#define lpfc_init_vfi_vr_MASK 0x00000001 -#define lpfc_init_vfi_vr_WORD word1 -#define lpfc_init_vfi_vt_SHIFT 30 -#define lpfc_init_vfi_vt_MASK 0x00000001 -#define lpfc_init_vfi_vt_WORD word1 -#define lpfc_init_vfi_vf_SHIFT 29 -#define lpfc_init_vfi_vf_MASK 0x00000001 -#define lpfc_init_vfi_vf_WORD word1 -#define lpfc_init_vfi_vfi_SHIFT 0 -#define lpfc_init_vfi_vfi_MASK 0x0000FFFF -#define lpfc_init_vfi_vfi_WORD word1 - uint32_t word2; -#define lpfc_init_vfi_fcfi_SHIFT 0 -#define lpfc_init_vfi_fcfi_MASK 0x0000FFFF -#define lpfc_init_vfi_fcfi_WORD word2 - uint32_t word3; -#define lpfc_init_vfi_pri_SHIFT 13 -#define lpfc_init_vfi_pri_MASK 0x00000007 -#define lpfc_init_vfi_pri_WORD word3 -#define lpfc_init_vfi_vf_id_SHIFT 1 -#define lpfc_init_vfi_vf_id_MASK 0x00000FFF -#define lpfc_init_vfi_vf_id_WORD word3 - uint32_t word4; -#define lpfc_init_vfi_hop_count_SHIFT 24 -#define lpfc_init_vfi_hop_count_MASK 0x000000FF -#define lpfc_init_vfi_hop_count_WORD word4 -}; - -struct lpfc_mbx_reg_vfi { - uint32_t word1; -#define lpfc_reg_vfi_vp_SHIFT 28 -#define lpfc_reg_vfi_vp_MASK 0x00000001 -#define lpfc_reg_vfi_vp_WORD word1 -#define lpfc_reg_vfi_vfi_SHIFT 0 -#define lpfc_reg_vfi_vfi_MASK 0x0000FFFF -#define lpfc_reg_vfi_vfi_WORD word1 - uint32_t word2; -#define lpfc_reg_vfi_vpi_SHIFT 16 -#define lpfc_reg_vfi_vpi_MASK 0x0000FFFF -#define lpfc_reg_vfi_vpi_WORD word2 -#define lpfc_reg_vfi_fcfi_SHIFT 0 -#define lpfc_reg_vfi_fcfi_MASK 0x0000FFFF -#define lpfc_reg_vfi_fcfi_WORD word2 - uint32_t word3_rsvd; - uint32_t word4_rsvd; - struct ulp_bde64 bde; - uint32_t word8_rsvd; - uint32_t word9_rsvd; - uint32_t word10; -#define lpfc_reg_vfi_nport_id_SHIFT 0 -#define lpfc_reg_vfi_nport_id_MASK 0x00FFFFFF -#define lpfc_reg_vfi_nport_id_WORD word10 -}; - -struct lpfc_mbx_init_vpi { - uint32_t word1; -#define lpfc_init_vpi_vfi_SHIFT 16 -#define lpfc_init_vpi_vfi_MASK 0x0000FFFF -#define lpfc_init_vpi_vfi_WORD word1 -#define lpfc_init_vpi_vpi_SHIFT 0 -#define lpfc_init_vpi_vpi_MASK 0x0000FFFF -#define lpfc_init_vpi_vpi_WORD word1 -}; - -struct lpfc_mbx_read_vpi { - uint32_t word1_rsvd; - uint32_t word2; -#define lpfc_mbx_read_vpi_vnportid_SHIFT 0 -#define lpfc_mbx_read_vpi_vnportid_MASK 0x00FFFFFF -#define lpfc_mbx_read_vpi_vnportid_WORD word2 - uint32_t word3_rsvd; - uint32_t word4; -#define lpfc_mbx_read_vpi_acq_alpa_SHIFT 0 -#define lpfc_mbx_read_vpi_acq_alpa_MASK 0x000000FF -#define lpfc_mbx_read_vpi_acq_alpa_WORD word4 -#define lpfc_mbx_read_vpi_pb_SHIFT 15 -#define lpfc_mbx_read_vpi_pb_MASK 0x00000001 -#define lpfc_mbx_read_vpi_pb_WORD word4 -#define lpfc_mbx_read_vpi_spec_alpa_SHIFT 16 -#define lpfc_mbx_read_vpi_spec_alpa_MASK 0x000000FF -#define lpfc_mbx_read_vpi_spec_alpa_WORD word4 -#define lpfc_mbx_read_vpi_ns_SHIFT 30 -#define lpfc_mbx_read_vpi_ns_MASK 0x00000001 -#define lpfc_mbx_read_vpi_ns_WORD word4 -#define lpfc_mbx_read_vpi_hl_SHIFT 31 -#define lpfc_mbx_read_vpi_hl_MASK 0x00000001 -#define lpfc_mbx_read_vpi_hl_WORD word4 - uint32_t word5_rsvd; - uint32_t word6; -#define lpfc_mbx_read_vpi_vpi_SHIFT 0 -#define lpfc_mbx_read_vpi_vpi_MASK 0x0000FFFF -#define lpfc_mbx_read_vpi_vpi_WORD word6 - uint32_t word7; -#define lpfc_mbx_read_vpi_mac_0_SHIFT 0 -#define lpfc_mbx_read_vpi_mac_0_MASK 0x000000FF -#define lpfc_mbx_read_vpi_mac_0_WORD word7 -#define lpfc_mbx_read_vpi_mac_1_SHIFT 8 -#define lpfc_mbx_read_vpi_mac_1_MASK 0x000000FF -#define lpfc_mbx_read_vpi_mac_1_WORD word7 -#define lpfc_mbx_read_vpi_mac_2_SHIFT 16 -#define lpfc_mbx_read_vpi_mac_2_MASK 0x000000FF -#define lpfc_mbx_read_vpi_mac_2_WORD word7 -#define lpfc_mbx_read_vpi_mac_3_SHIFT 24 -#define lpfc_mbx_read_vpi_mac_3_MASK 0x000000FF -#define lpfc_mbx_read_vpi_mac_3_WORD word7 - uint32_t word8; -#define lpfc_mbx_read_vpi_mac_4_SHIFT 0 -#define lpfc_mbx_read_vpi_mac_4_MASK 0x000000FF -#define lpfc_mbx_read_vpi_mac_4_WORD word8 -#define lpfc_mbx_read_vpi_mac_5_SHIFT 8 -#define lpfc_mbx_read_vpi_mac_5_MASK 0x000000FF -#define lpfc_mbx_read_vpi_mac_5_WORD word8 -#define lpfc_mbx_read_vpi_vlan_tag_SHIFT 16 -#define lpfc_mbx_read_vpi_vlan_tag_MASK 0x00000FFF -#define lpfc_mbx_read_vpi_vlan_tag_WORD word8 -#define lpfc_mbx_read_vpi_vv_SHIFT 28 -#define lpfc_mbx_read_vpi_vv_MASK 0x0000001 -#define lpfc_mbx_read_vpi_vv_WORD word8 -}; - -struct lpfc_mbx_unreg_vfi { - uint32_t word1_rsvd; - uint32_t word2; -#define lpfc_unreg_vfi_vfi_SHIFT 0 -#define lpfc_unreg_vfi_vfi_MASK 0x0000FFFF -#define lpfc_unreg_vfi_vfi_WORD word2 -}; - -struct lpfc_mbx_resume_rpi { - uint32_t word1; -#define lpfc_resume_rpi_rpi_SHIFT 0 -#define lpfc_resume_rpi_rpi_MASK 0x0000FFFF -#define lpfc_resume_rpi_rpi_WORD word1 - uint32_t event_tag; - uint32_t word3_rsvd; - uint32_t word4_rsvd; - uint32_t word5_rsvd; - uint32_t word6; -#define lpfc_resume_rpi_vpi_SHIFT 0 -#define lpfc_resume_rpi_vpi_MASK 0x0000FFFF -#define lpfc_resume_rpi_vpi_WORD word6 -#define lpfc_resume_rpi_vfi_SHIFT 16 -#define lpfc_resume_rpi_vfi_MASK 0x0000FFFF -#define lpfc_resume_rpi_vfi_WORD word6 -}; - -#define REG_FCF_INVALID_QID 0xFFFF -struct lpfc_mbx_reg_fcfi { - uint32_t word1; -#define lpfc_reg_fcfi_info_index_SHIFT 0 -#define lpfc_reg_fcfi_info_index_MASK 0x0000FFFF -#define lpfc_reg_fcfi_info_index_WORD word1 -#define lpfc_reg_fcfi_fcfi_SHIFT 16 -#define lpfc_reg_fcfi_fcfi_MASK 0x0000FFFF -#define lpfc_reg_fcfi_fcfi_WORD word1 - uint32_t word2; -#define lpfc_reg_fcfi_rq_id1_SHIFT 0 -#define lpfc_reg_fcfi_rq_id1_MASK 0x0000FFFF -#define lpfc_reg_fcfi_rq_id1_WORD word2 -#define lpfc_reg_fcfi_rq_id0_SHIFT 16 -#define lpfc_reg_fcfi_rq_id0_MASK 0x0000FFFF -#define lpfc_reg_fcfi_rq_id0_WORD word2 - uint32_t word3; -#define lpfc_reg_fcfi_rq_id3_SHIFT 0 -#define lpfc_reg_fcfi_rq_id3_MASK 0x0000FFFF -#define lpfc_reg_fcfi_rq_id3_WORD word3 -#define lpfc_reg_fcfi_rq_id2_SHIFT 16 -#define lpfc_reg_fcfi_rq_id2_MASK 0x0000FFFF -#define lpfc_reg_fcfi_rq_id2_WORD word3 - uint32_t word4; -#define lpfc_reg_fcfi_type_match0_SHIFT 24 -#define lpfc_reg_fcfi_type_match0_MASK 0x000000FF -#define lpfc_reg_fcfi_type_match0_WORD word4 -#define lpfc_reg_fcfi_type_mask0_SHIFT 16 -#define lpfc_reg_fcfi_type_mask0_MASK 0x000000FF -#define lpfc_reg_fcfi_type_mask0_WORD word4 -#define lpfc_reg_fcfi_rctl_match0_SHIFT 8 -#define lpfc_reg_fcfi_rctl_match0_MASK 0x000000FF -#define lpfc_reg_fcfi_rctl_match0_WORD word4 -#define lpfc_reg_fcfi_rctl_mask0_SHIFT 0 -#define lpfc_reg_fcfi_rctl_mask0_MASK 0x000000FF -#define lpfc_reg_fcfi_rctl_mask0_WORD word4 - uint32_t word5; -#define lpfc_reg_fcfi_type_match1_SHIFT 24 -#define lpfc_reg_fcfi_type_match1_MASK 0x000000FF -#define lpfc_reg_fcfi_type_match1_WORD word5 -#define lpfc_reg_fcfi_type_mask1_SHIFT 16 -#define lpfc_reg_fcfi_type_mask1_MASK 0x000000FF -#define lpfc_reg_fcfi_type_mask1_WORD word5 -#define lpfc_reg_fcfi_rctl_match1_SHIFT 8 -#define lpfc_reg_fcfi_rctl_match1_MASK 0x000000FF -#define lpfc_reg_fcfi_rctl_match1_WORD word5 -#define lpfc_reg_fcfi_rctl_mask1_SHIFT 0 -#define lpfc_reg_fcfi_rctl_mask1_MASK 0x000000FF -#define lpfc_reg_fcfi_rctl_mask1_WORD word5 - uint32_t word6; -#define lpfc_reg_fcfi_type_match2_SHIFT 24 -#define lpfc_reg_fcfi_type_match2_MASK 0x000000FF -#define lpfc_reg_fcfi_type_match2_WORD word6 -#define lpfc_reg_fcfi_type_mask2_SHIFT 16 -#define lpfc_reg_fcfi_type_mask2_MASK 0x000000FF -#define lpfc_reg_fcfi_type_mask2_WORD word6 -#define lpfc_reg_fcfi_rctl_match2_SHIFT 8 -#define lpfc_reg_fcfi_rctl_match2_MASK 0x000000FF -#define lpfc_reg_fcfi_rctl_match2_WORD word6 -#define lpfc_reg_fcfi_rctl_mask2_SHIFT 0 -#define lpfc_reg_fcfi_rctl_mask2_MASK 0x000000FF -#define lpfc_reg_fcfi_rctl_mask2_WORD word6 - uint32_t word7; -#define lpfc_reg_fcfi_type_match3_SHIFT 24 -#define lpfc_reg_fcfi_type_match3_MASK 0x000000FF -#define lpfc_reg_fcfi_type_match3_WORD word7 -#define lpfc_reg_fcfi_type_mask3_SHIFT 16 -#define lpfc_reg_fcfi_type_mask3_MASK 0x000000FF -#define lpfc_reg_fcfi_type_mask3_WORD word7 -#define lpfc_reg_fcfi_rctl_match3_SHIFT 8 -#define lpfc_reg_fcfi_rctl_match3_MASK 0x000000FF -#define lpfc_reg_fcfi_rctl_match3_WORD word7 -#define lpfc_reg_fcfi_rctl_mask3_SHIFT 0 -#define lpfc_reg_fcfi_rctl_mask3_MASK 0x000000FF -#define lpfc_reg_fcfi_rctl_mask3_WORD word7 - uint32_t word8; -#define lpfc_reg_fcfi_mam_SHIFT 13 -#define lpfc_reg_fcfi_mam_MASK 0x00000003 -#define lpfc_reg_fcfi_mam_WORD word8 -#define LPFC_MAM_BOTH 0 /* Both SPMA and FPMA */ -#define LPFC_MAM_SPMA 1 /* Server Provided MAC Address */ -#define LPFC_MAM_FPMA 2 /* Fabric Provided MAC Address */ -#define lpfc_reg_fcfi_vv_SHIFT 12 -#define lpfc_reg_fcfi_vv_MASK 0x00000001 -#define lpfc_reg_fcfi_vv_WORD word8 -#define lpfc_reg_fcfi_vlan_tag_SHIFT 0 -#define lpfc_reg_fcfi_vlan_tag_MASK 0x00000FFF -#define lpfc_reg_fcfi_vlan_tag_WORD word8 -}; - -struct lpfc_mbx_unreg_fcfi { - uint32_t word1_rsv; - uint32_t word2; -#define lpfc_unreg_fcfi_SHIFT 0 -#define lpfc_unreg_fcfi_MASK 0x0000FFFF -#define lpfc_unreg_fcfi_WORD word2 -}; - -struct lpfc_mbx_read_rev { - uint32_t word1; -#define lpfc_mbx_rd_rev_sli_lvl_SHIFT 16 -#define lpfc_mbx_rd_rev_sli_lvl_MASK 0x0000000F -#define lpfc_mbx_rd_rev_sli_lvl_WORD word1 -#define lpfc_mbx_rd_rev_fcoe_SHIFT 20 -#define lpfc_mbx_rd_rev_fcoe_MASK 0x00000001 -#define lpfc_mbx_rd_rev_fcoe_WORD word1 -#define lpfc_mbx_rd_rev_vpd_SHIFT 29 -#define lpfc_mbx_rd_rev_vpd_MASK 0x00000001 -#define lpfc_mbx_rd_rev_vpd_WORD word1 - uint32_t first_hw_rev; - uint32_t second_hw_rev; - uint32_t word4_rsvd; - uint32_t third_hw_rev; - uint32_t word6; -#define lpfc_mbx_rd_rev_fcph_low_SHIFT 0 -#define lpfc_mbx_rd_rev_fcph_low_MASK 0x000000FF -#define lpfc_mbx_rd_rev_fcph_low_WORD word6 -#define lpfc_mbx_rd_rev_fcph_high_SHIFT 8 -#define lpfc_mbx_rd_rev_fcph_high_MASK 0x000000FF -#define lpfc_mbx_rd_rev_fcph_high_WORD word6 -#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT 16 -#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK 0x000000FF -#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD word6 -#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT 24 -#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK 0x000000FF -#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD word6 - uint32_t word7_rsvd; - uint32_t fw_id_rev; - uint8_t fw_name[16]; - uint32_t ulp_fw_id_rev; - uint8_t ulp_fw_name[16]; - uint32_t word18_47_rsvd[30]; - uint32_t word48; -#define lpfc_mbx_rd_rev_avail_len_SHIFT 0 -#define lpfc_mbx_rd_rev_avail_len_MASK 0x00FFFFFF -#define lpfc_mbx_rd_rev_avail_len_WORD word48 - uint32_t vpd_paddr_low; - uint32_t vpd_paddr_high; - uint32_t avail_vpd_len; - uint32_t rsvd_52_63[12]; -}; - -struct lpfc_mbx_read_config { - uint32_t word1; -#define lpfc_mbx_rd_conf_max_bbc_SHIFT 0 -#define lpfc_mbx_rd_conf_max_bbc_MASK 0x000000FF -#define lpfc_mbx_rd_conf_max_bbc_WORD word1 -#define lpfc_mbx_rd_conf_init_bbc_SHIFT 8 -#define lpfc_mbx_rd_conf_init_bbc_MASK 0x000000FF -#define lpfc_mbx_rd_conf_init_bbc_WORD word1 - uint32_t word2; -#define lpfc_mbx_rd_conf_nport_did_SHIFT 0 -#define lpfc_mbx_rd_conf_nport_did_MASK 0x00FFFFFF -#define lpfc_mbx_rd_conf_nport_did_WORD word2 -#define lpfc_mbx_rd_conf_topology_SHIFT 24 -#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF -#define lpfc_mbx_rd_conf_topology_WORD word2 - uint32_t word3; -#define lpfc_mbx_rd_conf_ao_SHIFT 0 -#define lpfc_mbx_rd_conf_ao_MASK 0x00000001 -#define lpfc_mbx_rd_conf_ao_WORD word3 -#define lpfc_mbx_rd_conf_bb_scn_SHIFT 8 -#define lpfc_mbx_rd_conf_bb_scn_MASK 0x0000000F -#define lpfc_mbx_rd_conf_bb_scn_WORD word3 -#define lpfc_mbx_rd_conf_cbb_scn_SHIFT 12 -#define lpfc_mbx_rd_conf_cbb_scn_MASK 0x0000000F -#define lpfc_mbx_rd_conf_cbb_scn_WORD word3 -#define lpfc_mbx_rd_conf_mc_SHIFT 29 -#define lpfc_mbx_rd_conf_mc_MASK 0x00000001 -#define lpfc_mbx_rd_conf_mc_WORD word3 - uint32_t word4; -#define lpfc_mbx_rd_conf_e_d_tov_SHIFT 0 -#define lpfc_mbx_rd_conf_e_d_tov_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_e_d_tov_WORD word4 - uint32_t word5; -#define lpfc_mbx_rd_conf_lp_tov_SHIFT 0 -#define lpfc_mbx_rd_conf_lp_tov_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_lp_tov_WORD word5 - uint32_t word6; -#define lpfc_mbx_rd_conf_r_a_tov_SHIFT 0 -#define lpfc_mbx_rd_conf_r_a_tov_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_r_a_tov_WORD word6 - uint32_t word7; -#define lpfc_mbx_rd_conf_r_t_tov_SHIFT 0 -#define lpfc_mbx_rd_conf_r_t_tov_MASK 0x000000FF -#define lpfc_mbx_rd_conf_r_t_tov_WORD word7 - uint32_t word8; -#define lpfc_mbx_rd_conf_al_tov_SHIFT 0 -#define lpfc_mbx_rd_conf_al_tov_MASK 0x0000000F -#define lpfc_mbx_rd_conf_al_tov_WORD word8 - uint32_t word9; -#define lpfc_mbx_rd_conf_lmt_SHIFT 0 -#define lpfc_mbx_rd_conf_lmt_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_lmt_WORD word9 - uint32_t word10; -#define lpfc_mbx_rd_conf_max_alpa_SHIFT 0 -#define lpfc_mbx_rd_conf_max_alpa_MASK 0x000000FF -#define lpfc_mbx_rd_conf_max_alpa_WORD word10 - uint32_t word11_rsvd; - uint32_t word12; -#define lpfc_mbx_rd_conf_xri_base_SHIFT 0 -#define lpfc_mbx_rd_conf_xri_base_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_xri_base_WORD word12 -#define lpfc_mbx_rd_conf_xri_count_SHIFT 16 -#define lpfc_mbx_rd_conf_xri_count_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_xri_count_WORD word12 - uint32_t word13; -#define lpfc_mbx_rd_conf_rpi_base_SHIFT 0 -#define lpfc_mbx_rd_conf_rpi_base_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_rpi_base_WORD word13 -#define lpfc_mbx_rd_conf_rpi_count_SHIFT 16 -#define lpfc_mbx_rd_conf_rpi_count_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_rpi_count_WORD word13 - uint32_t word14; -#define lpfc_mbx_rd_conf_vpi_base_SHIFT 0 -#define lpfc_mbx_rd_conf_vpi_base_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_vpi_base_WORD word14 -#define lpfc_mbx_rd_conf_vpi_count_SHIFT 16 -#define lpfc_mbx_rd_conf_vpi_count_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_vpi_count_WORD word14 - uint32_t word15; -#define lpfc_mbx_rd_conf_vfi_base_SHIFT 0 -#define lpfc_mbx_rd_conf_vfi_base_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_vfi_base_WORD word15 -#define lpfc_mbx_rd_conf_vfi_count_SHIFT 16 -#define lpfc_mbx_rd_conf_vfi_count_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_vfi_count_WORD word15 - uint32_t word16; -#define lpfc_mbx_rd_conf_fcfi_base_SHIFT 0 -#define lpfc_mbx_rd_conf_fcfi_base_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_fcfi_base_WORD word16 -#define lpfc_mbx_rd_conf_fcfi_count_SHIFT 16 -#define lpfc_mbx_rd_conf_fcfi_count_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_fcfi_count_WORD word16 - uint32_t word17; -#define lpfc_mbx_rd_conf_rq_count_SHIFT 0 -#define lpfc_mbx_rd_conf_rq_count_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_rq_count_WORD word17 -#define lpfc_mbx_rd_conf_eq_count_SHIFT 16 -#define lpfc_mbx_rd_conf_eq_count_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_eq_count_WORD word17 - uint32_t word18; -#define lpfc_mbx_rd_conf_wq_count_SHIFT 0 -#define lpfc_mbx_rd_conf_wq_count_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_wq_count_WORD word18 -#define lpfc_mbx_rd_conf_cq_count_SHIFT 16 -#define lpfc_mbx_rd_conf_cq_count_MASK 0x0000FFFF -#define lpfc_mbx_rd_conf_cq_count_WORD word18 -}; - -struct lpfc_mbx_request_features { - uint32_t word1; -#define lpfc_mbx_rq_ftr_qry_SHIFT 0 -#define lpfc_mbx_rq_ftr_qry_MASK 0x00000001 -#define lpfc_mbx_rq_ftr_qry_WORD word1 - uint32_t word2; -#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT 0 -#define lpfc_mbx_rq_ftr_rq_iaab_MASK 0x00000001 -#define lpfc_mbx_rq_ftr_rq_iaab_WORD word2 -#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT 1 -#define lpfc_mbx_rq_ftr_rq_npiv_MASK 0x00000001 -#define lpfc_mbx_rq_ftr_rq_npiv_WORD word2 -#define lpfc_mbx_rq_ftr_rq_dif_SHIFT 2 -#define lpfc_mbx_rq_ftr_rq_dif_MASK 0x00000001 -#define lpfc_mbx_rq_ftr_rq_dif_WORD word2 -#define lpfc_mbx_rq_ftr_rq_vf_SHIFT 3 -#define lpfc_mbx_rq_ftr_rq_vf_MASK 0x00000001 -#define lpfc_mbx_rq_ftr_rq_vf_WORD word2 -#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT 4 -#define lpfc_mbx_rq_ftr_rq_fcpi_MASK 0x00000001 -#define lpfc_mbx_rq_ftr_rq_fcpi_WORD word2 -#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT 5 -#define lpfc_mbx_rq_ftr_rq_fcpt_MASK 0x00000001 -#define lpfc_mbx_rq_ftr_rq_fcpt_WORD word2 -#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT 6 -#define lpfc_mbx_rq_ftr_rq_fcpc_MASK 0x00000001 -#define lpfc_mbx_rq_ftr_rq_fcpc_WORD word2 -#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT 7 -#define lpfc_mbx_rq_ftr_rq_ifip_MASK 0x00000001 -#define lpfc_mbx_rq_ftr_rq_ifip_WORD word2 - uint32_t word3; -#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT 0 -#define lpfc_mbx_rq_ftr_rsp_iaab_MASK 0x00000001 -#define lpfc_mbx_rq_ftr_rsp_iaab_WORD word3 -#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT 1 -#define lpfc_mbx_rq_ftr_rsp_npiv_MASK 0x00000001 -#define lpfc_mbx_rq_ftr_rsp_npiv_WORD word3 -#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT 2 -#define lpfc_mbx_rq_ftr_rsp_dif_MASK 0x00000001 -#define lpfc_mbx_rq_ftr_rsp_dif_WORD word3 -#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT 3 -#define lpfc_mbx_rq_ftr_rsp_vf__MASK 0x00000001 -#define lpfc_mbx_rq_ftr_rsp_vf_WORD word3 -#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT 4 -#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK 0x00000001 -#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD word3 -#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT 5 -#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK 0x00000001 -#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD word3 -#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT 6 -#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK 0x00000001 -#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD word3 -#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT 7 -#define lpfc_mbx_rq_ftr_rsp_ifip_MASK 0x00000001 -#define lpfc_mbx_rq_ftr_rsp_ifip_WORD word3 -}; - -/* Mailbox Completion Queue Error Messages */ -#define MB_CQE_STATUS_SUCCESS 0x0 -#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 -#define MB_CQE_STATUS_INVALID_PARAMETER 0x2 -#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3 -#define MB_CEQ_STATUS_QUEUE_FLUSHING 0x4 -#define MB_CQE_STATUS_DMA_FAILED 0x5 - -/* mailbox queue entry structure */ -struct lpfc_mqe { - uint32_t word0; -#define lpfc_mqe_status_SHIFT 16 -#define lpfc_mqe_status_MASK 0x0000FFFF -#define lpfc_mqe_status_WORD word0 -#define lpfc_mqe_command_SHIFT 8 -#define lpfc_mqe_command_MASK 0x000000FF -#define lpfc_mqe_command_WORD word0 - union { - uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1]; - /* sli4 mailbox commands */ - struct lpfc_mbx_sli4_config sli4_config; - struct lpfc_mbx_init_vfi init_vfi; - struct lpfc_mbx_reg_vfi reg_vfi; - struct lpfc_mbx_reg_vfi unreg_vfi; - struct lpfc_mbx_init_vpi init_vpi; - struct lpfc_mbx_resume_rpi resume_rpi; - struct lpfc_mbx_read_fcf_tbl read_fcf_tbl; - struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry; - struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry; - struct lpfc_mbx_reg_fcfi reg_fcfi; - struct lpfc_mbx_unreg_fcfi unreg_fcfi; - struct lpfc_mbx_mq_create mq_create; - struct lpfc_mbx_eq_create eq_create; - struct lpfc_mbx_cq_create cq_create; - struct lpfc_mbx_wq_create wq_create; - struct lpfc_mbx_rq_create rq_create; - struct lpfc_mbx_mq_destroy mq_destroy; - struct lpfc_mbx_eq_destroy eq_destroy; - struct lpfc_mbx_cq_destroy cq_destroy; - struct lpfc_mbx_wq_destroy wq_destroy; - struct lpfc_mbx_rq_destroy rq_destroy; - struct lpfc_mbx_post_sgl_pages post_sgl_pages; - struct lpfc_mbx_nembed_cmd nembed_cmd; - struct lpfc_mbx_read_rev read_rev; - struct lpfc_mbx_read_vpi read_vpi; - struct lpfc_mbx_read_config rd_config; - struct lpfc_mbx_request_features req_ftrs; - struct lpfc_mbx_post_hdr_tmpl hdr_tmpl; - struct lpfc_mbx_nop nop; - } un; -}; - -struct lpfc_mcqe { - uint32_t word0; -#define lpfc_mcqe_status_SHIFT 0 -#define lpfc_mcqe_status_MASK 0x0000FFFF -#define lpfc_mcqe_status_WORD word0 -#define lpfc_mcqe_ext_status_SHIFT 16 -#define lpfc_mcqe_ext_status_MASK 0x0000FFFF -#define lpfc_mcqe_ext_status_WORD word0 - uint32_t mcqe_tag0; - uint32_t mcqe_tag1; - uint32_t trailer; -#define lpfc_trailer_valid_SHIFT 31 -#define lpfc_trailer_valid_MASK 0x00000001 -#define lpfc_trailer_valid_WORD trailer -#define lpfc_trailer_async_SHIFT 30 -#define lpfc_trailer_async_MASK 0x00000001 -#define lpfc_trailer_async_WORD trailer -#define lpfc_trailer_hpi_SHIFT 29 -#define lpfc_trailer_hpi_MASK 0x00000001 -#define lpfc_trailer_hpi_WORD trailer -#define lpfc_trailer_completed_SHIFT 28 -#define lpfc_trailer_completed_MASK 0x00000001 -#define lpfc_trailer_completed_WORD trailer -#define lpfc_trailer_consumed_SHIFT 27 -#define lpfc_trailer_consumed_MASK 0x00000001 -#define lpfc_trailer_consumed_WORD trailer -#define lpfc_trailer_type_SHIFT 16 -#define lpfc_trailer_type_MASK 0x000000FF -#define lpfc_trailer_type_WORD trailer -#define lpfc_trailer_code_SHIFT 8 -#define lpfc_trailer_code_MASK 0x000000FF -#define lpfc_trailer_code_WORD trailer -#define LPFC_TRAILER_CODE_LINK 0x1 -#define LPFC_TRAILER_CODE_FCOE 0x2 -#define LPFC_TRAILER_CODE_DCBX 0x3 -}; - -struct lpfc_acqe_link { - uint32_t word0; -#define lpfc_acqe_link_speed_SHIFT 24 -#define lpfc_acqe_link_speed_MASK 0x000000FF -#define lpfc_acqe_link_speed_WORD word0 -#define LPFC_ASYNC_LINK_SPEED_ZERO 0x0 -#define LPFC_ASYNC_LINK_SPEED_10MBPS 0x1 -#define LPFC_ASYNC_LINK_SPEED_100MBPS 0x2 -#define LPFC_ASYNC_LINK_SPEED_1GBPS 0x3 -#define LPFC_ASYNC_LINK_SPEED_10GBPS 0x4 -#define lpfc_acqe_link_duplex_SHIFT 16 -#define lpfc_acqe_link_duplex_MASK 0x000000FF -#define lpfc_acqe_link_duplex_WORD word0 -#define LPFC_ASYNC_LINK_DUPLEX_NONE 0x0 -#define LPFC_ASYNC_LINK_DUPLEX_HALF 0x1 -#define LPFC_ASYNC_LINK_DUPLEX_FULL 0x2 -#define lpfc_acqe_link_status_SHIFT 8 -#define lpfc_acqe_link_status_MASK 0x000000FF -#define lpfc_acqe_link_status_WORD word0 -#define LPFC_ASYNC_LINK_STATUS_DOWN 0x0 -#define LPFC_ASYNC_LINK_STATUS_UP 0x1 -#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN 0x2 -#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP 0x3 -#define lpfc_acqe_link_physical_SHIFT 0 -#define lpfc_acqe_link_physical_MASK 0x000000FF -#define lpfc_acqe_link_physical_WORD word0 -#define LPFC_ASYNC_LINK_PORT_A 0x0 -#define LPFC_ASYNC_LINK_PORT_B 0x1 - uint32_t word1; -#define lpfc_acqe_link_fault_SHIFT 0 -#define lpfc_acqe_link_fault_MASK 0x000000FF -#define lpfc_acqe_link_fault_WORD word1 -#define LPFC_ASYNC_LINK_FAULT_NONE 0x0 -#define LPFC_ASYNC_LINK_FAULT_LOCAL 0x1 -#define LPFC_ASYNC_LINK_FAULT_REMOTE 0x2 - uint32_t event_tag; - uint32_t trailer; -}; - -struct lpfc_acqe_fcoe { - uint32_t fcf_index; - uint32_t word1; -#define lpfc_acqe_fcoe_fcf_count_SHIFT 0 -#define lpfc_acqe_fcoe_fcf_count_MASK 0x0000FFFF -#define lpfc_acqe_fcoe_fcf_count_WORD word1 -#define lpfc_acqe_fcoe_event_type_SHIFT 16 -#define lpfc_acqe_fcoe_event_type_MASK 0x0000FFFF -#define lpfc_acqe_fcoe_event_type_WORD word1 -#define LPFC_FCOE_EVENT_TYPE_NEW_FCF 0x1 -#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL 0x2 -#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD 0x3 - uint32_t event_tag; - uint32_t trailer; -}; - -struct lpfc_acqe_dcbx { - uint32_t tlv_ttl; - uint32_t reserved; - uint32_t event_tag; - uint32_t trailer; -}; - -/* - * Define the bootstrap mailbox (bmbx) region used to communicate - * mailbox command between the host and port. The mailbox consists - * of a payload area of 256 bytes and a completion queue of length - * 16 bytes. - */ -struct lpfc_bmbx_create { - struct lpfc_mqe mqe; - struct lpfc_mcqe mcqe; -}; - -#define SGL_ALIGN_SZ 64 -#define SGL_PAGE_SIZE 4096 -/* align SGL addr on a size boundary - adjust address up */ -#define NO_XRI ((uint16_t)-1) -struct wqe_common { - uint32_t word6; -#define wqe_xri_SHIFT 0 -#define wqe_xri_MASK 0x0000FFFF -#define wqe_xri_WORD word6 -#define wqe_ctxt_tag_SHIFT 16 -#define wqe_ctxt_tag_MASK 0x0000FFFF -#define wqe_ctxt_tag_WORD word6 - uint32_t word7; -#define wqe_ct_SHIFT 2 -#define wqe_ct_MASK 0x00000003 -#define wqe_ct_WORD word7 -#define wqe_status_SHIFT 4 -#define wqe_status_MASK 0x0000000f -#define wqe_status_WORD word7 -#define wqe_cmnd_SHIFT 8 -#define wqe_cmnd_MASK 0x000000ff -#define wqe_cmnd_WORD word7 -#define wqe_class_SHIFT 16 -#define wqe_class_MASK 0x00000007 -#define wqe_class_WORD word7 -#define wqe_pu_SHIFT 20 -#define wqe_pu_MASK 0x00000003 -#define wqe_pu_WORD word7 -#define wqe_erp_SHIFT 22 -#define wqe_erp_MASK 0x00000001 -#define wqe_erp_WORD word7 -#define wqe_lnk_SHIFT 23 -#define wqe_lnk_MASK 0x00000001 -#define wqe_lnk_WORD word7 -#define wqe_tmo_SHIFT 24 -#define wqe_tmo_MASK 0x000000ff -#define wqe_tmo_WORD word7 - uint32_t abort_tag; /* word 8 in WQE */ - uint32_t word9; -#define wqe_reqtag_SHIFT 0 -#define wqe_reqtag_MASK 0x0000FFFF -#define wqe_reqtag_WORD word9 -#define wqe_rcvoxid_SHIFT 16 -#define wqe_rcvoxid_MASK 0x0000FFFF -#define wqe_rcvoxid_WORD word9 - uint32_t word10; -#define wqe_pri_SHIFT 16 -#define wqe_pri_MASK 0x00000007 -#define wqe_pri_WORD word10 -#define wqe_pv_SHIFT 19 -#define wqe_pv_MASK 0x00000001 -#define wqe_pv_WORD word10 -#define wqe_xc_SHIFT 21 -#define wqe_xc_MASK 0x00000001 -#define wqe_xc_WORD word10 -#define wqe_ccpe_SHIFT 23 -#define wqe_ccpe_MASK 0x00000001 -#define wqe_ccpe_WORD word10 -#define wqe_ccp_SHIFT 24 -#define wqe_ccp_MASK 0x000000ff -#define wqe_ccp_WORD word10 - uint32_t word11; -#define wqe_cmd_type_SHIFT 0 -#define wqe_cmd_type_MASK 0x0000000f -#define wqe_cmd_type_WORD word11 -#define wqe_wqec_SHIFT 7 -#define wqe_wqec_MASK 0x00000001 -#define wqe_wqec_WORD word11 -#define wqe_cqid_SHIFT 16 -#define wqe_cqid_MASK 0x000003ff -#define wqe_cqid_WORD word11 -}; - -struct wqe_did { - uint32_t word5; -#define wqe_els_did_SHIFT 0 -#define wqe_els_did_MASK 0x00FFFFFF -#define wqe_els_did_WORD word5 -#define wqe_xmit_bls_ar_SHIFT 30 -#define wqe_xmit_bls_ar_MASK 0x00000001 -#define wqe_xmit_bls_ar_WORD word5 -#define wqe_xmit_bls_xo_SHIFT 31 -#define wqe_xmit_bls_xo_MASK 0x00000001 -#define wqe_xmit_bls_xo_WORD word5 -}; - -struct els_request64_wqe { - struct ulp_bde64 bde; - uint32_t payload_len; - uint32_t word4; -#define els_req64_sid_SHIFT 0 -#define els_req64_sid_MASK 0x00FFFFFF -#define els_req64_sid_WORD word4 -#define els_req64_sp_SHIFT 24 -#define els_req64_sp_MASK 0x00000001 -#define els_req64_sp_WORD word4 -#define els_req64_vf_SHIFT 25 -#define els_req64_vf_MASK 0x00000001 -#define els_req64_vf_WORD word4 - struct wqe_did wqe_dest; - struct wqe_common wqe_com; /* words 6-11 */ - uint32_t word12; -#define els_req64_vfid_SHIFT 1 -#define els_req64_vfid_MASK 0x00000FFF -#define els_req64_vfid_WORD word12 -#define els_req64_pri_SHIFT 13 -#define els_req64_pri_MASK 0x00000007 -#define els_req64_pri_WORD word12 - uint32_t word13; -#define els_req64_hopcnt_SHIFT 24 -#define els_req64_hopcnt_MASK 0x000000ff -#define els_req64_hopcnt_WORD word13 - uint32_t reserved[2]; -}; - -struct xmit_els_rsp64_wqe { - struct ulp_bde64 bde; - uint32_t rsvd3; - uint32_t rsvd4; - struct wqe_did wqe_dest; - struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; -}; - -struct xmit_bls_rsp64_wqe { - uint32_t payload0; - uint32_t word1; -#define xmit_bls_rsp64_rxid_SHIFT 0 -#define xmit_bls_rsp64_rxid_MASK 0x0000ffff -#define xmit_bls_rsp64_rxid_WORD word1 -#define xmit_bls_rsp64_oxid_SHIFT 16 -#define xmit_bls_rsp64_oxid_MASK 0x0000ffff -#define xmit_bls_rsp64_oxid_WORD word1 - uint32_t word2; -#define xmit_bls_rsp64_seqcntlo_SHIFT 0 -#define xmit_bls_rsp64_seqcntlo_MASK 0x0000ffff -#define xmit_bls_rsp64_seqcntlo_WORD word2 -#define xmit_bls_rsp64_seqcnthi_SHIFT 16 -#define xmit_bls_rsp64_seqcnthi_MASK 0x0000ffff -#define xmit_bls_rsp64_seqcnthi_WORD word2 - uint32_t rsrvd3; - uint32_t rsrvd4; - struct wqe_did wqe_dest; - struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; -}; -struct wqe_rctl_dfctl { - uint32_t word5; -#define wqe_si_SHIFT 2 -#define wqe_si_MASK 0x000000001 -#define wqe_si_WORD word5 -#define wqe_la_SHIFT 3 -#define wqe_la_MASK 0x000000001 -#define wqe_la_WORD word5 -#define wqe_ls_SHIFT 7 -#define wqe_ls_MASK 0x000000001 -#define wqe_ls_WORD word5 -#define wqe_dfctl_SHIFT 8 -#define wqe_dfctl_MASK 0x0000000ff -#define wqe_dfctl_WORD word5 -#define wqe_type_SHIFT 16 -#define wqe_type_MASK 0x0000000ff -#define wqe_type_WORD word5 -#define wqe_rctl_SHIFT 24 -#define wqe_rctl_MASK 0x0000000ff -#define wqe_rctl_WORD word5 -}; - -struct xmit_seq64_wqe { - struct ulp_bde64 bde; - uint32_t paylaod_offset; - uint32_t relative_offset; - struct wqe_rctl_dfctl wge_ctl; - struct wqe_common wqe_com; /* words 6-11 */ - /* Note: word10 different REVISIT */ - uint32_t xmit_len; - uint32_t rsvd_12_15[3]; -}; -struct xmit_bcast64_wqe { - struct ulp_bde64 bde; - uint32_t paylaod_len; - uint32_t rsvd4; - struct wqe_rctl_dfctl wge_ctl; /* word 5 */ - struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; -}; - -struct gen_req64_wqe { - struct ulp_bde64 bde; - uint32_t command_len; - uint32_t payload_len; - struct wqe_rctl_dfctl wge_ctl; /* word 5 */ - struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; -}; - -struct create_xri_wqe { - uint32_t rsrvd[5]; /* words 0-4 */ - struct wqe_did wqe_dest; /* word 5 */ - struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; /* word 12-15 */ -}; - -#define T_REQUEST_TAG 3 -#define T_XRI_TAG 1 - -struct abort_cmd_wqe { - uint32_t rsrvd[3]; - uint32_t word3; -#define abort_cmd_ia_SHIFT 0 -#define abort_cmd_ia_MASK 0x000000001 -#define abort_cmd_ia_WORD word3 -#define abort_cmd_criteria_SHIFT 8 -#define abort_cmd_criteria_MASK 0x0000000ff -#define abort_cmd_criteria_WORD word3 - uint32_t rsrvd4; - uint32_t rsrvd5; - struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; /* word 12-15 */ -}; - -struct fcp_iwrite64_wqe { - struct ulp_bde64 bde; - uint32_t payload_len; - uint32_t total_xfer_len; - uint32_t initial_xfer_len; - struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; /* word 12-15 */ -}; - -struct fcp_iread64_wqe { - struct ulp_bde64 bde; - uint32_t payload_len; /* word 3 */ - uint32_t total_xfer_len; /* word 4 */ - uint32_t rsrvd5; /* word 5 */ - struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; /* word 12-15 */ -}; - -struct fcp_icmnd64_wqe { - struct ulp_bde64 bde; /* words 0-2 */ - uint32_t rsrvd[3]; /* words 3-5 */ - struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; /* word 12-15 */ -}; - - -union lpfc_wqe { - uint32_t words[16]; - struct lpfc_wqe_generic generic; - struct fcp_icmnd64_wqe fcp_icmd; - struct fcp_iread64_wqe fcp_iread; - struct fcp_iwrite64_wqe fcp_iwrite; - struct abort_cmd_wqe abort_cmd; - struct create_xri_wqe create_xri; - struct xmit_bcast64_wqe xmit_bcast64; - struct xmit_seq64_wqe xmit_sequence; - struct xmit_bls_rsp64_wqe xmit_bls_rsp; - struct xmit_els_rsp64_wqe xmit_els_rsp; - struct els_request64_wqe els_req; - struct gen_req64_wqe gen_req; -}; - -#define FCP_COMMAND 0x0 -#define FCP_COMMAND_DATA_OUT 0x1 -#define ELS_COMMAND_NON_FIP 0xC -#define ELS_COMMAND_FIP 0xD -#define OTHER_COMMAND 0x8 - diff --git a/trunk/drivers/scsi/lpfc/lpfc_init.c b/trunk/drivers/scsi/lpfc/lpfc_init.c index 2f5907f92eea..86d1bdcbf2d8 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_init.c +++ b/trunk/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -34,10 +34,8 @@ #include #include -#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" -#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -53,23 +51,9 @@ char *_dump_buf_dif; unsigned long _dump_buf_dif_order; spinlock_t _dump_buf_lock; +static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); static int lpfc_post_rcv_buf(struct lpfc_hba *); -static int lpfc_sli4_queue_create(struct lpfc_hba *); -static void lpfc_sli4_queue_destroy(struct lpfc_hba *); -static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); -static int lpfc_setup_endian_order(struct lpfc_hba *); -static int lpfc_sli4_read_config(struct lpfc_hba *); -static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *); -static void lpfc_free_sgl_list(struct lpfc_hba *); -static int lpfc_init_sgl_list(struct lpfc_hba *); -static int lpfc_init_active_sgl_array(struct lpfc_hba *); -static void lpfc_free_active_sgl(struct lpfc_hba *); -static int lpfc_hba_down_post_s3(struct lpfc_hba *phba); -static int lpfc_hba_down_post_s4(struct lpfc_hba *phba); -static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *); -static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *); -static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *); static struct scsi_transport_template *lpfc_transport_template = NULL; static struct scsi_transport_template *lpfc_vport_transport_template = NULL; @@ -108,7 +92,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba) return -ENOMEM; } - mb = &pmb->u.mb; + mb = &pmb->mb; phba->link_state = LPFC_INIT_MBX_CMDS; if (lpfc_is_LC_HBA(phba->pcidev->device)) { @@ -221,11 +205,6 @@ lpfc_config_port_prep(struct lpfc_hba *phba) mb->mbxCommand, mb->mbxStatus); mb->un.varDmp.word_cnt = 0; } - /* dump mem may return a zero when finished or we got a - * mailbox error, either way we are done. - */ - if (mb->un.varDmp.word_cnt == 0) - break; if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset) mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset; lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, @@ -254,7 +233,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba) static void lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) { - if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) + if (pmboxq->mb.mbxStatus == MBX_SUCCESS) phba->temp_sensor_support = 1; else phba->temp_sensor_support = 0; @@ -281,7 +260,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) /* character array used for decoding dist type. */ char dist_char[] = "nabx"; - if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) { + if (pmboxq->mb.mbxStatus != MBX_SUCCESS) { mempool_free(pmboxq, phba->mbox_mem_pool); return; } @@ -289,7 +268,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) prg = (struct prog_id *) &prog_id_word; /* word 7 contain option rom version */ - prog_id_word = pmboxq->u.mb.un.varWords[7]; + prog_id_word = pmboxq->mb.un.varWords[7]; /* Decode the Option rom version word to a readable string */ if (prg->dist < 4) @@ -346,7 +325,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) phba->link_state = LPFC_HBA_ERROR; return -ENOMEM; } - mb = &pmb->u.mb; + mb = &pmb->mb; /* Get login parameters for NID. */ lpfc_read_sparam(phba, pmb, 0); @@ -385,7 +364,6 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Update the fc_host data structures with new wwn. */ fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); - fc_host_max_npiv_vports(shost) = phba->max_vpi; /* If no serial number in VPD data, use low 6 bytes of WWNN */ /* This should be consolidated into parse_vpd ? - mr */ @@ -482,18 +460,17 @@ lpfc_config_port_post(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, "0352 Config MSI mailbox command " "failed, mbxCmd x%x, mbxStatus x%x\n", - pmb->u.mb.mbxCommand, - pmb->u.mb.mbxStatus); + pmb->mb.mbxCommand, pmb->mb.mbxStatus); mempool_free(pmb, phba->mbox_mem_pool); return -EIO; } } - spin_lock_irq(&phba->hbalock); /* Initialize ERATT handling flag */ phba->hba_flag &= ~HBA_ERATT_HANDLED; /* Enable appropriate host interrupts */ + spin_lock_irq(&phba->hbalock); status = readl(phba->HCregaddr); status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA; if (psli->num_rings > 0) @@ -594,20 +571,16 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) { struct lpfc_vport **vports; int i; - - if (phba->sli_rev <= LPFC_SLI_REV3) { - /* Disable interrupts */ - writel(0, phba->HCregaddr); - readl(phba->HCregaddr); /* flush */ - } + /* Disable interrupts */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ if (phba->pport->load_flag & FC_UNLOADING) lpfc_cleanup_discovery_resources(phba->pport); else { vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 0; i <= phba->max_vports && - vports[i] != NULL; i++) + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) lpfc_cleanup_discovery_resources(vports[i]); lpfc_destroy_vport_work_array(phba, vports); } @@ -615,7 +588,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) } /** - * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset + * lpfc_hba_down_post - Perform lpfc uninitialization after HBA reset * @phba: pointer to lpfc HBA data structure. * * This routine will do uninitialization after the HBA is reset when bring @@ -625,8 +598,8 @@ lpfc_hba_down_prep(struct lpfc_hba *phba) * 0 - sucess. * Any other value - error. **/ -static int -lpfc_hba_down_post_s3(struct lpfc_hba *phba) +int +lpfc_hba_down_post(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; @@ -669,77 +642,6 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba) return 0; } -/** - * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset - * @phba: pointer to lpfc HBA data structure. - * - * This routine will do uninitialization after the HBA is reset when bring - * down the SLI Layer. - * - * Return codes - * 0 - sucess. - * Any other value - error. - **/ -static int -lpfc_hba_down_post_s4(struct lpfc_hba *phba) -{ - struct lpfc_scsi_buf *psb, *psb_next; - LIST_HEAD(aborts); - int ret; - unsigned long iflag = 0; - ret = lpfc_hba_down_post_s3(phba); - if (ret) - return ret; - /* At this point in time the HBA is either reset or DOA. Either - * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be - * on the lpfc_sgl_list so that it can either be freed if the - * driver is unloading or reposted if the driver is restarting - * the port. - */ - spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */ - /* scsl_buf_list */ - /* abts_sgl_list_lock required because worker thread uses this - * list. - */ - spin_lock(&phba->sli4_hba.abts_sgl_list_lock); - list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, - &phba->sli4_hba.lpfc_sgl_list); - spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); - /* abts_scsi_buf_list_lock required because worker thread uses this - * list. - */ - spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock); - list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list, - &aborts); - spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock); - spin_unlock_irq(&phba->hbalock); - - list_for_each_entry_safe(psb, psb_next, &aborts, list) { - psb->pCmd = NULL; - psb->status = IOSTAT_SUCCESS; - } - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_splice(&aborts, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); - return 0; -} - -/** - * lpfc_hba_down_post - Wrapper func for hba down post routine - * @phba: pointer to lpfc HBA data structure. - * - * This routine wraps the actual SLI3 or SLI4 routine for performing - * uninitialization after the HBA is reset when bring down the SLI Layer. - * - * Return codes - * 0 - sucess. - * Any other value - error. - **/ -int -lpfc_hba_down_post(struct lpfc_hba *phba) -{ - return (*phba->lpfc_hba_down_post)(phba); -} /** * lpfc_hb_timeout - The HBA-timer timeout handler @@ -907,7 +809,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) "taking this port offline.\n"); spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI_ACTIVE; + psli->sli_flag &= ~LPFC_SLI2_ACTIVE; spin_unlock_irq(&phba->hbalock); lpfc_offline_prep(phba); @@ -932,15 +834,13 @@ lpfc_offline_eratt(struct lpfc_hba *phba) struct lpfc_sli *psli = &phba->sli; spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI_ACTIVE; + psli->sli_flag &= ~LPFC_SLI2_ACTIVE; spin_unlock_irq(&phba->hbalock); lpfc_offline_prep(phba); lpfc_offline(phba); lpfc_reset_barrier(phba); - spin_lock_irq(&phba->hbalock); lpfc_sli_brdreset(phba); - spin_unlock_irq(&phba->hbalock); lpfc_hba_down_post(phba); lpfc_sli_brdready(phba, HS_MBRDY); lpfc_unblock_mgmt_io(phba); @@ -948,25 +848,6 @@ lpfc_offline_eratt(struct lpfc_hba *phba) return; } -/** - * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention - * @phba: pointer to lpfc hba data structure. - * - * This routine is called to bring a SLI4 HBA offline when HBA hardware error - * other than Port Error 6 has been detected. - **/ -static void -lpfc_sli4_offline_eratt(struct lpfc_hba *phba) -{ - lpfc_offline_prep(phba); - lpfc_offline(phba); - lpfc_sli4_brdreset(phba); - lpfc_hba_down_post(phba); - lpfc_sli4_post_status_check(phba); - lpfc_unblock_mgmt_io(phba); - phba->link_state = LPFC_HBA_ERROR; -} - /** * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler * @phba: pointer to lpfc hba data structure. @@ -983,16 +864,6 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) struct lpfc_sli_ring *pring; struct lpfc_sli *psli = &phba->sli; - /* If the pci channel is offline, ignore possible errors, - * since we cannot communicate with the pci card anyway. - */ - if (pci_channel_offline(phba->pcidev)) { - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~DEFER_ERATT; - spin_unlock_irq(&phba->hbalock); - return; - } - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0479 Deferred Adapter Hardware Error " "Data: x%x x%x x%x\n", @@ -1000,7 +871,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) phba->work_status[0], phba->work_status[1]); spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI_ACTIVE; + psli->sli_flag &= ~LPFC_SLI2_ACTIVE; spin_unlock_irq(&phba->hbalock); @@ -1038,30 +909,13 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba) if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING))) phba->work_hs = old_host_status & ~HS_FFER1; - spin_lock_irq(&phba->hbalock); phba->hba_flag &= ~DEFER_ERATT; - spin_unlock_irq(&phba->hbalock); phba->work_status[0] = readl(phba->MBslimaddr + 0xa8); phba->work_status[1] = readl(phba->MBslimaddr + 0xac); } -static void -lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) -{ - struct lpfc_board_event_header board_event; - struct Scsi_Host *shost; - - board_event.event_type = FC_REG_BOARD_EVENT; - board_event.subcategory = LPFC_EVENT_PORTINTERR; - shost = lpfc_shost_from_vport(phba->pport); - fc_host_post_vendor_event(shost, fc_get_event_number(), - sizeof(board_event), - (char *) &board_event, - LPFC_NL_VENDOR_ID); -} - /** - * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler + * lpfc_handle_eratt - The HBA hardware error handler * @phba: pointer to lpfc hba data structure. * * This routine is invoked to handle the following HBA hardware error @@ -1070,8 +924,8 @@ lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba) * 2 - DMA ring index out of range * 3 - Mailbox command came back as unknown **/ -static void -lpfc_handle_eratt_s3(struct lpfc_hba *phba) +void +lpfc_handle_eratt(struct lpfc_hba *phba) { struct lpfc_vport *vport = phba->pport; struct lpfc_sli *psli = &phba->sli; @@ -1080,23 +934,24 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) unsigned long temperature; struct temp_event temp_event_data; struct Scsi_Host *shost; + struct lpfc_board_event_header board_event; /* If the pci channel is offline, ignore possible errors, - * since we cannot communicate with the pci card anyway. - */ - if (pci_channel_offline(phba->pcidev)) { - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~DEFER_ERATT; - spin_unlock_irq(&phba->hbalock); + * since we cannot communicate with the pci card anyway. */ + if (pci_channel_offline(phba->pcidev)) return; - } - /* If resets are disabled then leave the HBA alone and return */ if (!phba->cfg_enable_hba_reset) return; /* Send an internal error event to mgmt application */ - lpfc_board_errevt_to_mgmt(phba); + board_event.event_type = FC_REG_BOARD_EVENT; + board_event.subcategory = LPFC_EVENT_PORTINTERR; + shost = lpfc_shost_from_vport(phba->pport); + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(board_event), + (char *) &board_event, + LPFC_NL_VENDOR_ID); if (phba->hba_flag & DEFER_ERATT) lpfc_handle_deferred_eratt(phba); @@ -1110,7 +965,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) phba->work_status[0], phba->work_status[1]); spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI_ACTIVE; + psli->sli_flag &= ~LPFC_SLI2_ACTIVE; spin_unlock_irq(&phba->hbalock); /* @@ -1181,65 +1036,6 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba) return; } -/** - * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to handle the SLI4 HBA hardware error attention - * conditions. - **/ -static void -lpfc_handle_eratt_s4(struct lpfc_hba *phba) -{ - struct lpfc_vport *vport = phba->pport; - uint32_t event_data; - struct Scsi_Host *shost; - - /* If the pci channel is offline, ignore possible errors, since - * we cannot communicate with the pci card anyway. - */ - if (pci_channel_offline(phba->pcidev)) - return; - /* If resets are disabled then leave the HBA alone and return */ - if (!phba->cfg_enable_hba_reset) - return; - - /* Send an internal error event to mgmt application */ - lpfc_board_errevt_to_mgmt(phba); - - /* For now, the actual action for SLI4 device handling is not - * specified yet, just treated it as adaptor hardware failure - */ - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n", - phba->work_status[0], phba->work_status[1]); - - event_data = FC_REG_DUMP_EVENT; - shost = lpfc_shost_from_vport(vport); - fc_host_post_vendor_event(shost, fc_get_event_number(), - sizeof(event_data), (char *) &event_data, - SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX); - - lpfc_sli4_offline_eratt(phba); -} - -/** - * lpfc_handle_eratt - Wrapper func for handling hba error attention - * @phba: pointer to lpfc HBA data structure. - * - * This routine wraps the actual SLI3 or SLI4 hba error attention handling - * routine from the API jump table function pointer from the lpfc_hba struct. - * - * Return codes - * 0 - sucess. - * Any other value - error. - **/ -void -lpfc_handle_eratt(struct lpfc_hba *phba) -{ - (*phba->lpfc_handle_eratt)(phba); -} - /** * lpfc_handle_latt - The HBA link event handler * @phba: pointer to lpfc hba data structure. @@ -1341,7 +1137,7 @@ lpfc_handle_latt(struct lpfc_hba *phba) * 0 - pointer to the VPD passed in is NULL * 1 - success **/ -int +static int lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) { uint8_t lenlo, lenhi; @@ -1496,7 +1292,6 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) uint16_t dev_id = phba->pcidev->device; int max_speed; int GE = 0; - int oneConnect = 0; /* default is not a oneConnect */ struct { char * name; int max_speed; @@ -1642,14 +1437,6 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) case PCI_DEVICE_ID_PROTEUS_S: m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"}; break; - case PCI_DEVICE_ID_TIGERSHARK: - oneConnect = 1; - m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"}; - break; - case PCI_DEVICE_ID_TIGERSHARK_S: - oneConnect = 1; - m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"}; - break; default: m = (typeof(m)){ NULL }; break; @@ -1657,24 +1444,13 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) if (mdp && mdp[0] == '\0') snprintf(mdp, 79,"%s", m.name); - /* oneConnect hba requires special processing, they are all initiators - * and we put the port number on the end - */ - if (descp && descp[0] == '\0') { - if (oneConnect) - snprintf(descp, 255, - "Emulex OneConnect %s, FCoE Initiator, Port %s", - m.name, - phba->Port); - else - snprintf(descp, 255, - "Emulex %s %d%s %s %s", - m.name, m.max_speed, - (GE) ? "GE" : "Gb", - m.bus, - (GE) ? "FCoE Adapter" : - "Fibre Channel Adapter"); - } + if (descp && descp[0] == '\0') + snprintf(descp, 255, + "Emulex %s %d%s %s %s", + m.name, m.max_speed, + (GE) ? "GE" : "Gb", + m.bus, + (GE) ? "FCoE Adapter" : "Fibre Channel Adapter"); } /** @@ -1757,8 +1533,7 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt) icmd->ulpCommand = CMD_QUE_RING_BUF64_CN; icmd->ulpLe = 1; - if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) == - IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) { lpfc_mbuf_free(phba, mp1->virt, mp1->phys); kfree(mp1); cnt++; @@ -1986,6 +1761,7 @@ lpfc_cleanup(struct lpfc_vport *vport) * Lets wait for this to happen, if needed. */ while (!list_empty(&vport->fc_nodes)) { + if (i++ > 3000) { lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, "0233 Nodelist not empty\n"); @@ -2006,6 +1782,7 @@ lpfc_cleanup(struct lpfc_vport *vport) /* Wait for any activity on ndlps to settle */ msleep(10); } + return; } /** @@ -2026,36 +1803,22 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport) } /** - * lpfc_stop_hba_timers - Stop all the timers associated with an HBA + * lpfc_stop_phba_timers - Stop all the timers associated with an HBA * @phba: pointer to lpfc hba data structure. * * This routine stops all the timers associated with a HBA. This function is * invoked before either putting a HBA offline or unloading the driver. **/ -void -lpfc_stop_hba_timers(struct lpfc_hba *phba) +static void +lpfc_stop_phba_timers(struct lpfc_hba *phba) { + del_timer_sync(&phba->fcp_poll_timer); lpfc_stop_vport_timers(phba->pport); del_timer_sync(&phba->sli.mbox_tmo); del_timer_sync(&phba->fabric_block_timer); - del_timer_sync(&phba->eratt_poll); - del_timer_sync(&phba->hb_tmofunc); phba->hb_outstanding = 0; - - switch (phba->pci_dev_grp) { - case LPFC_PCI_DEV_LP: - /* Stop any LightPulse device specific driver timers */ - del_timer_sync(&phba->fcp_poll_timer); - break; - case LPFC_PCI_DEV_OC: - /* Stop any OneConnect device sepcific driver timers */ - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0297 Invalid device group (x%x)\n", - phba->pci_dev_grp); - break; - } + del_timer_sync(&phba->hb_tmofunc); + del_timer_sync(&phba->eratt_poll); return; } @@ -2115,21 +1878,14 @@ lpfc_online(struct lpfc_hba *phba) return 1; } - if (phba->sli_rev == LPFC_SLI_REV4) { - if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */ - lpfc_unblock_mgmt_io(phba); - return 1; - } - } else { - if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ - lpfc_unblock_mgmt_io(phba); - return 1; - } + if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */ + lpfc_unblock_mgmt_io(phba); + return 1; } vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { struct Scsi_Host *shost; shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); @@ -2191,12 +1947,11 @@ lpfc_offline_prep(struct lpfc_hba * phba) /* Issue an unreg_login to all nodes on all vports */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) { - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { struct Scsi_Host *shost; if (vports[i]->load_flag & FC_UNLOADING) continue; - vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED; shost = lpfc_shost_from_vport(vports[i]); list_for_each_entry_safe(ndlp, next_ndlp, &vports[i]->fc_nodes, @@ -2220,7 +1975,7 @@ lpfc_offline_prep(struct lpfc_hba * phba) } lpfc_destroy_vport_work_array(phba, vports); - lpfc_sli_mbox_sys_shutdown(phba); + lpfc_sli_flush_mbox_queue(phba); } /** @@ -2241,11 +1996,11 @@ lpfc_offline(struct lpfc_hba *phba) if (phba->pport->fc_flag & FC_OFFLINE_MODE) return; - /* stop port and all timers associated with this hba */ - lpfc_stop_port(phba); + /* stop all timers associated with this hba */ + lpfc_stop_phba_timers(phba); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) lpfc_stop_vport_timers(vports[i]); lpfc_destroy_vport_work_array(phba, vports); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, @@ -2258,7 +2013,7 @@ lpfc_offline(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); vports[i]->work_port_events = 0; @@ -2351,10 +2106,6 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_lun = vport->cfg_max_luns; shost->this_id = -1; shost->max_cmd_len = 16; - if (phba->sli_rev == LPFC_SLI_REV4) { - shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE; - shost->sg_tablesize = phba->cfg_sg_seg_cnt; - } /* * Set initial can_queue value since 0 is no longer supported and @@ -2372,7 +2123,6 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) /* Initialize all internally managed lists. */ INIT_LIST_HEAD(&vport->fc_nodes); - INIT_LIST_HEAD(&vport->rcv_buffer_list); spin_lock_init(&vport->work_port_lock); init_timer(&vport->fc_disctmo); @@ -2564,501 +2314,192 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost) } /** - * lpfc_stop_port_s3 - Stop SLI3 device port + * lpfc_enable_msix - Enable MSI-X interrupt mode * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to stop an SLI3 device port, it stops the device - * from generating interrupts and stops the device driver's timers for the - * device. + * This routine is invoked to enable the MSI-X interrupt vectors. The kernel + * function pci_enable_msix() is called to enable the MSI-X vectors. Note that + * pci_enable_msix(), once invoked, enables either all or nothing, depending + * on the current availability of PCI vector resources. The device driver is + * responsible for calling the individual request_irq() to register each MSI-X + * vector with a interrupt handler, which is done in this function. Note that + * later when device is unloading, the driver should always call free_irq() + * on all MSI-X vectors it has done request_irq() on before calling + * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device + * will be left with MSI-X enabled and leaks its vectors. + * + * Return codes + * 0 - sucessful + * other values - error **/ -static void -lpfc_stop_port_s3(struct lpfc_hba *phba) +static int +lpfc_enable_msix(struct lpfc_hba *phba) { - /* Clear all interrupt enable conditions */ - writel(0, phba->HCregaddr); - readl(phba->HCregaddr); /* flush */ - /* Clear all pending interrupts */ - writel(0xffffffff, phba->HAregaddr); - readl(phba->HAregaddr); /* flush */ + int rc, i; + LPFC_MBOXQ_t *pmb; - /* Reset some HBA SLI setup states */ - lpfc_stop_hba_timers(phba); - phba->pport->work_port_events = 0; -} + /* Set up MSI-X multi-message vectors */ + for (i = 0; i < LPFC_MSIX_VECTORS; i++) + phba->msix_entries[i].entry = i; -/** - * lpfc_stop_port_s4 - Stop SLI4 device port - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to stop an SLI4 device port, it stops the device - * from generating interrupts and stops the device driver's timers for the - * device. - **/ -static void -lpfc_stop_port_s4(struct lpfc_hba *phba) -{ - /* Reset some HBA SLI4 setup states */ - lpfc_stop_hba_timers(phba); - phba->pport->work_port_events = 0; - phba->sli4_hba.intr_enable = 0; - /* Hard clear it for now, shall have more graceful way to wait later */ - phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; -} + /* Configure MSI-X capability structure */ + rc = pci_enable_msix(phba->pcidev, phba->msix_entries, + ARRAY_SIZE(phba->msix_entries)); + if (rc) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0420 PCI enable MSI-X failed (%d)\n", rc); + goto msi_fail_out; + } else + for (i = 0; i < LPFC_MSIX_VECTORS; i++) + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0477 MSI-X entry[%d]: vector=x%x " + "message=%d\n", i, + phba->msix_entries[i].vector, + phba->msix_entries[i].entry); + /* + * Assign MSI-X vectors to interrupt handlers + */ -/** - * lpfc_stop_port - Wrapper function for stopping hba port - * @phba: Pointer to HBA context object. - * - * This routine wraps the actual SLI3 or SLI4 hba stop port routine from - * the API jump table function pointer from the lpfc_hba struct. - **/ -void -lpfc_stop_port(struct lpfc_hba *phba) -{ - phba->lpfc_stop_port(phba); -} + /* vector-0 is associated to slow-path handler */ + rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler, + IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba); + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0421 MSI-X slow-path request_irq failed " + "(%d)\n", rc); + goto msi_fail_out; + } -/** - * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to remove the driver default fcf record from - * the port. This routine currently acts on FCF Index 0. - * - **/ -void -lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba) -{ - int rc = 0; - LPFC_MBOXQ_t *mboxq; - struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record; - uint32_t mbox_tmo, req_len; - uint32_t shdr_status, shdr_add_status; + /* vector-1 is associated to fast-path handler */ + rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler, + IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba); - mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mboxq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2020 Failed to allocate mbox for ADD_FCF cmd\n"); - return; + if (rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0429 MSI-X fast-path request_irq failed " + "(%d)\n", rc); + goto irq_fail_out; } - req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) - - sizeof(struct lpfc_sli4_cfg_mhdr); - rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_DELETE_FCF, - req_len, LPFC_SLI4_MBX_EMBED); /* - * In phase 1, there is a single FCF index, 0. In phase2, the driver - * supports multiple FCF indices. + * Configure HBA MSI-X attention conditions to messages */ - del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry; - bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1); - bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record, - phba->fcf.fcf_indx); + pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - else { - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); - rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); - } - /* The IOCTL status is embedded in the mailbox subheader. */ - shdr_status = bf_get(lpfc_mbox_hdr_status, - &del_fcf_record->header.cfg_shdr.response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, - &del_fcf_record->header.cfg_shdr.response); - if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2516 DEL FCF of default FCF Index failed " - "mbx status x%x, status x%x add_status x%x\n", - rc, shdr_status, shdr_add_status); + if (!pmb) { + rc = -ENOMEM; + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0474 Unable to allocate memory for issuing " + "MBOX_CONFIG_MSI command\n"); + goto mem_fail_out; } - if (rc != MBX_TIMEOUT) - mempool_free(mboxq, phba->mbox_mem_pool); -} - -/** - * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code - * @phba: pointer to lpfc hba data structure. - * @acqe_link: pointer to the async link completion queue entry. - * - * This routine is to parse the SLI4 link-attention link fault code and - * translate it into the base driver's read link attention mailbox command - * status. - * - * Return: Link-attention status in terms of base driver's coding. - **/ -static uint16_t -lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba, - struct lpfc_acqe_link *acqe_link) -{ - uint16_t latt_fault; - - switch (bf_get(lpfc_acqe_link_fault, acqe_link)) { - case LPFC_ASYNC_LINK_FAULT_NONE: - case LPFC_ASYNC_LINK_FAULT_LOCAL: - case LPFC_ASYNC_LINK_FAULT_REMOTE: - latt_fault = 0; - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0398 Invalid link fault code: x%x\n", - bf_get(lpfc_acqe_link_fault, acqe_link)); - latt_fault = MBXERR_ERROR; - break; + rc = lpfc_config_msi(phba, pmb); + if (rc) + goto mbx_fail_out; + rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); + if (rc != MBX_SUCCESS) { + lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, + "0351 Config MSI mailbox command failed, " + "mbxCmd x%x, mbxStatus x%x\n", + pmb->mb.mbxCommand, pmb->mb.mbxStatus); + goto mbx_fail_out; } - return latt_fault; -} -/** - * lpfc_sli4_parse_latt_type - Parse sli4 link attention type - * @phba: pointer to lpfc hba data structure. - * @acqe_link: pointer to the async link completion queue entry. - * - * This routine is to parse the SLI4 link attention type and translate it - * into the base driver's link attention type coding. - * - * Return: Link attention type in terms of base driver's coding. - **/ -static uint8_t -lpfc_sli4_parse_latt_type(struct lpfc_hba *phba, - struct lpfc_acqe_link *acqe_link) -{ - uint8_t att_type; + /* Free memory allocated for mailbox command */ + mempool_free(pmb, phba->mbox_mem_pool); + return rc; - switch (bf_get(lpfc_acqe_link_status, acqe_link)) { - case LPFC_ASYNC_LINK_STATUS_DOWN: - case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN: - att_type = AT_LINK_DOWN; - break; - case LPFC_ASYNC_LINK_STATUS_UP: - /* Ignore physical link up events - wait for logical link up */ - att_type = AT_RESERVED; - break; - case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP: - att_type = AT_LINK_UP; - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0399 Invalid link attention type: x%x\n", - bf_get(lpfc_acqe_link_status, acqe_link)); - att_type = AT_RESERVED; - break; - } - return att_type; -} +mbx_fail_out: + /* Free memory allocated for mailbox command */ + mempool_free(pmb, phba->mbox_mem_pool); -/** - * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed - * @phba: pointer to lpfc hba data structure. - * @acqe_link: pointer to the async link completion queue entry. - * - * This routine is to parse the SLI4 link-attention link speed and translate - * it into the base driver's link-attention link speed coding. - * - * Return: Link-attention link speed in terms of base driver's coding. - **/ -static uint8_t -lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba, - struct lpfc_acqe_link *acqe_link) -{ - uint8_t link_speed; +mem_fail_out: + /* free the irq already requested */ + free_irq(phba->msix_entries[1].vector, phba); - switch (bf_get(lpfc_acqe_link_speed, acqe_link)) { - case LPFC_ASYNC_LINK_SPEED_ZERO: - link_speed = LA_UNKNW_LINK; - break; - case LPFC_ASYNC_LINK_SPEED_10MBPS: - link_speed = LA_UNKNW_LINK; - break; - case LPFC_ASYNC_LINK_SPEED_100MBPS: - link_speed = LA_UNKNW_LINK; - break; - case LPFC_ASYNC_LINK_SPEED_1GBPS: - link_speed = LA_1GHZ_LINK; - break; - case LPFC_ASYNC_LINK_SPEED_10GBPS: - link_speed = LA_10GHZ_LINK; - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0483 Invalid link-attention link speed: x%x\n", - bf_get(lpfc_acqe_link_speed, acqe_link)); - link_speed = LA_UNKNW_LINK; - break; - } - return link_speed; +irq_fail_out: + /* free the irq already requested */ + free_irq(phba->msix_entries[0].vector, phba); + +msi_fail_out: + /* Unconfigure MSI-X capability structure */ + pci_disable_msix(phba->pcidev); + return rc; } /** - * lpfc_sli4_async_link_evt - Process the asynchronous link event + * lpfc_disable_msix - Disable MSI-X interrupt mode * @phba: pointer to lpfc hba data structure. - * @acqe_link: pointer to the async link completion queue entry. * - * This routine is to handle the SLI4 asynchronous link event. + * This routine is invoked to release the MSI-X vectors and then disable the + * MSI-X interrupt mode. **/ static void -lpfc_sli4_async_link_evt(struct lpfc_hba *phba, - struct lpfc_acqe_link *acqe_link) +lpfc_disable_msix(struct lpfc_hba *phba) { - struct lpfc_dmabuf *mp; - LPFC_MBOXQ_t *pmb; - MAILBOX_t *mb; - READ_LA_VAR *la; - uint8_t att_type; - - att_type = lpfc_sli4_parse_latt_type(phba, acqe_link); - if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP) - return; - pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!pmb) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0395 The mboxq allocation failed\n"); - return; - } - mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); - if (!mp) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0396 The lpfc_dmabuf allocation failed\n"); - goto out_free_pmb; - } - mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); - if (!mp->virt) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0397 The mbuf allocation failed\n"); - goto out_free_dmabuf; - } - - /* Cleanup any outstanding ELS commands */ - lpfc_els_flush_all_cmd(phba); - - /* Block ELS IOCBs until we have done process link event */ - phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT; - - /* Update link event statistics */ - phba->sli.slistat.link_event++; - - /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */ - lpfc_read_la(phba, pmb, mp); - pmb->vport = phba->pport; - - /* Parse and translate status field */ - mb = &pmb->u.mb; - mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link); - - /* Parse and translate link attention fields */ - la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA; - la->eventTag = acqe_link->event_tag; - la->attType = att_type; - la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link); - - /* Fake the the following irrelvant fields */ - la->topology = TOPOLOGY_PT_PT; - la->granted_AL_PA = 0; - la->il = 0; - la->pb = 0; - la->fa = 0; - la->mm = 0; - - /* Keep the link status for extra SLI4 state machine reference */ - phba->sli4_hba.link_state.speed = - bf_get(lpfc_acqe_link_speed, acqe_link); - phba->sli4_hba.link_state.duplex = - bf_get(lpfc_acqe_link_duplex, acqe_link); - phba->sli4_hba.link_state.status = - bf_get(lpfc_acqe_link_status, acqe_link); - phba->sli4_hba.link_state.physical = - bf_get(lpfc_acqe_link_physical, acqe_link); - phba->sli4_hba.link_state.fault = - bf_get(lpfc_acqe_link_fault, acqe_link); - - /* Invoke the lpfc_handle_latt mailbox command callback function */ - lpfc_mbx_cmpl_read_la(phba, pmb); - - return; + int i; -out_free_dmabuf: - kfree(mp); -out_free_pmb: - mempool_free(pmb, phba->mbox_mem_pool); + /* Free up MSI-X multi-message vectors */ + for (i = 0; i < LPFC_MSIX_VECTORS; i++) + free_irq(phba->msix_entries[i].vector, phba); + /* Disable MSI-X */ + pci_disable_msix(phba->pcidev); } /** - * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event + * lpfc_enable_msi - Enable MSI interrupt mode * @phba: pointer to lpfc hba data structure. - * @acqe_link: pointer to the async fcoe completion queue entry. * - * This routine is to handle the SLI4 asynchronous fcoe event. - **/ -static void -lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, - struct lpfc_acqe_fcoe *acqe_fcoe) + * This routine is invoked to enable the MSI interrupt mode. The kernel + * function pci_enable_msi() is called to enable the MSI vector. The + * device driver is responsible for calling the request_irq() to register + * MSI vector with a interrupt the handler, which is done in this function. + * + * Return codes + * 0 - sucessful + * other values - error + */ +static int +lpfc_enable_msi(struct lpfc_hba *phba) { - uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe); int rc; - switch (event_type) { - case LPFC_FCOE_EVENT_TYPE_NEW_FCF: - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "2546 New FCF found index 0x%x tag 0x%x \n", - acqe_fcoe->fcf_index, - acqe_fcoe->event_tag); - /* - * If the current FCF is in discovered state, - * do nothing. - */ - spin_lock_irq(&phba->hbalock); - if (phba->fcf.fcf_flag & FCF_DISCOVERED) { - spin_unlock_irq(&phba->hbalock); - break; - } - spin_unlock_irq(&phba->hbalock); - - /* Read the FCF table and re-discover SAN. */ - rc = lpfc_sli4_read_fcf_record(phba, - LPFC_FCOE_FCF_GET_FIRST); - if (rc) - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "2547 Read FCF record failed 0x%x\n", - rc); - break; - - case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2548 FCF Table full count 0x%x tag 0x%x \n", - bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe), - acqe_fcoe->event_tag); - break; - - case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: - lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, - "2549 FCF disconnected fron network index 0x%x" - " tag 0x%x \n", acqe_fcoe->fcf_index, - acqe_fcoe->event_tag); - /* If the event is not for currently used fcf do nothing */ - if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index) - break; - /* - * Currently, driver support only one FCF - so treat this as - * a link down. - */ - lpfc_linkdown(phba); - /* Unregister FCF if no devices connected to it */ - lpfc_unregister_unused_fcf(phba); - break; - - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0288 Unknown FCoE event type 0x%x event tag " - "0x%x\n", event_type, acqe_fcoe->event_tag); - break; + rc = pci_enable_msi(phba->pcidev); + if (!rc) + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0462 PCI enable MSI mode success.\n"); + else { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0471 PCI enable MSI mode failed (%d)\n", rc); + return rc; } -} - -/** - * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event - * @phba: pointer to lpfc hba data structure. - * @acqe_link: pointer to the async dcbx completion queue entry. - * - * This routine is to handle the SLI4 asynchronous dcbx event. - **/ -static void -lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba, - struct lpfc_acqe_dcbx *acqe_dcbx) -{ - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0290 The SLI4 DCBX asynchronous event is not " - "handled yet\n"); -} - -/** - * lpfc_sli4_async_event_proc - Process all the pending asynchronous event - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked by the worker thread to process all the pending - * SLI4 asynchronous events. - **/ -void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) -{ - struct lpfc_cq_event *cq_event; - /* First, declare the async event has been handled */ - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~ASYNC_EVENT; - spin_unlock_irq(&phba->hbalock); - /* Now, handle all the async events */ - while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) { - /* Get the first event from the head of the event queue */ - spin_lock_irq(&phba->hbalock); - list_remove_head(&phba->sli4_hba.sp_asynce_work_queue, - cq_event, struct lpfc_cq_event, list); - spin_unlock_irq(&phba->hbalock); - /* Process the asynchronous event */ - switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) { - case LPFC_TRAILER_CODE_LINK: - lpfc_sli4_async_link_evt(phba, - &cq_event->cqe.acqe_link); - break; - case LPFC_TRAILER_CODE_FCOE: - lpfc_sli4_async_fcoe_evt(phba, - &cq_event->cqe.acqe_fcoe); - break; - case LPFC_TRAILER_CODE_DCBX: - lpfc_sli4_async_dcbx_evt(phba, - &cq_event->cqe.acqe_dcbx); - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "1804 Invalid asynchrous event code: " - "x%x\n", bf_get(lpfc_trailer_code, - &cq_event->cqe.mcqe_cmpl)); - break; - } - /* Free the completion event processed to the free pool */ - lpfc_sli4_cq_event_release(phba, cq_event); + rc = request_irq(phba->pcidev->irq, lpfc_intr_handler, + IRQF_SHARED, LPFC_DRIVER_NAME, phba); + if (rc) { + pci_disable_msi(phba->pcidev); + lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, + "0478 MSI request_irq failed (%d)\n", rc); } + return rc; } /** - * lpfc_api_table_setup - Set up per hba pci-device group func api jump table + * lpfc_disable_msi - Disable MSI interrupt mode * @phba: pointer to lpfc hba data structure. - * @dev_grp: The HBA PCI-Device group number. - * - * This routine is invoked to set up the per HBA PCI-Device group function - * API jump table entries. * - * Return: 0 if success, otherwise -ENODEV - **/ -int -lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) -{ - int rc; - - /* Set up lpfc PCI-device group */ - phba->pci_dev_grp = dev_grp; - - /* The LPFC_PCI_DEV_OC uses SLI4 */ - if (dev_grp == LPFC_PCI_DEV_OC) - phba->sli_rev = LPFC_SLI_REV4; - - /* Set up device INIT API function jump table */ - rc = lpfc_init_api_table_setup(phba, dev_grp); - if (rc) - return -ENODEV; - /* Set up SCSI API function jump table */ - rc = lpfc_scsi_api_table_setup(phba, dev_grp); - if (rc) - return -ENODEV; - /* Set up SLI API function jump table */ - rc = lpfc_sli_api_table_setup(phba, dev_grp); - if (rc) - return -ENODEV; - /* Set up MBOX API function jump table */ - rc = lpfc_mbox_api_table_setup(phba, dev_grp); - if (rc) - return -ENODEV; + * This routine is invoked to disable the MSI interrupt mode. The driver + * calls free_irq() on MSI vector it has done request_irq() on before + * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and + * a device will be left with MSI enabled and leaks its vector. + */ - return 0; +static void +lpfc_disable_msi(struct lpfc_hba *phba) +{ + free_irq(phba->pcidev->irq, phba); + pci_disable_msi(phba->pcidev); + return; } /** @@ -3068,8 +2509,9 @@ lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) * * This routine it invoked to log the currently used active interrupt mode * to the device. - **/ -static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) + */ +static void +lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) { switch (intr_mode) { case 0: @@ -3092,4383 +2534,659 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) return; } -/** - * lpfc_enable_pci_dev - Enable a generic PCI device. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to enable the PCI device that is common to all - * PCI devices. - * - * Return codes - * 0 - sucessful - * other values - error - **/ -static int -lpfc_enable_pci_dev(struct lpfc_hba *phba) -{ - struct pci_dev *pdev; - int bars; - - /* Obtain PCI device reference */ - if (!phba->pcidev) - goto out_error; - else - pdev = phba->pcidev; - /* Select PCI BARs */ - bars = pci_select_bars(pdev, IORESOURCE_MEM); - /* Enable PCI device */ - if (pci_enable_device_mem(pdev)) - goto out_error; - /* Request PCI resource for the device */ - if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) - goto out_disable_device; - /* Set up device as PCI master and save state for EEH */ - pci_set_master(pdev); - pci_try_set_mwi(pdev); - pci_save_state(pdev); - - return 0; - -out_disable_device: - pci_disable_device(pdev); -out_error: - return -ENODEV; -} - -/** - * lpfc_disable_pci_dev - Disable a generic PCI device. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to disable the PCI device that is common to all - * PCI devices. - **/ static void -lpfc_disable_pci_dev(struct lpfc_hba *phba) +lpfc_stop_port(struct lpfc_hba *phba) { - struct pci_dev *pdev; - int bars; + /* Clear all interrupt enable conditions */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); /* flush */ + /* Clear all pending interrupts */ + writel(0xffffffff, phba->HAregaddr); + readl(phba->HAregaddr); /* flush */ - /* Obtain PCI device reference */ - if (!phba->pcidev) - return; - else - pdev = phba->pcidev; - /* Select PCI BARs */ - bars = pci_select_bars(pdev, IORESOURCE_MEM); - /* Release PCI resource and disable PCI device */ - pci_release_selected_regions(pdev, bars); - pci_disable_device(pdev); - /* Null out PCI private reference to driver */ - pci_set_drvdata(pdev, NULL); + /* Reset some HBA SLI setup states */ + lpfc_stop_phba_timers(phba); + phba->pport->work_port_events = 0; return; } /** - * lpfc_reset_hba - Reset a hba + * lpfc_enable_intr - Enable device interrupt * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to reset a hba device. It brings the HBA - * offline, performs a board restart, and then brings the board back - * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up - * on outstanding mailbox commands. + * This routine is invoked to enable device interrupt and associate driver's + * interrupt handler(s) to interrupt vector(s). Depends on the interrupt + * mode configured to the driver, the driver will try to fallback from the + * configured interrupt mode to an interrupt mode which is supported by the + * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ. + * + * Return codes + * 0 - sucessful + * other values - error **/ -void -lpfc_reset_hba(struct lpfc_hba *phba) +static uint32_t +lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) { - /* If resets are disabled then set error state and return. */ - if (!phba->cfg_enable_hba_reset) { - phba->link_state = LPFC_HBA_ERROR; - return; - } - lpfc_offline_prep(phba); - lpfc_offline(phba); - lpfc_sli_brdrestart(phba); - lpfc_online(phba); - lpfc_unblock_mgmt_io(phba); -} + uint32_t intr_mode = LPFC_INTR_ERROR; + int retval; -/** - * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to set up the driver internal resources specific to - * support the SLI-3 HBA device it attached to. - * - * Return codes - * 0 - sucessful - * other values - error - **/ -static int -lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) -{ - struct lpfc_sli *psli; - - /* - * Initialize timers used by driver - */ - - /* Heartbeat timer */ - init_timer(&phba->hb_tmofunc); - phba->hb_tmofunc.function = lpfc_hb_timeout; - phba->hb_tmofunc.data = (unsigned long)phba; - - psli = &phba->sli; - /* MBOX heartbeat timer */ - init_timer(&psli->mbox_tmo); - psli->mbox_tmo.function = lpfc_mbox_timeout; - psli->mbox_tmo.data = (unsigned long) phba; - /* FCP polling mode timer */ - init_timer(&phba->fcp_poll_timer); - phba->fcp_poll_timer.function = lpfc_poll_timeout; - phba->fcp_poll_timer.data = (unsigned long) phba; - /* Fabric block timer */ - init_timer(&phba->fabric_block_timer); - phba->fabric_block_timer.function = lpfc_fabric_block_timeout; - phba->fabric_block_timer.data = (unsigned long) phba; - /* EA polling mode timer */ - init_timer(&phba->eratt_poll); - phba->eratt_poll.function = lpfc_poll_eratt; - phba->eratt_poll.data = (unsigned long) phba; - - /* Host attention work mask setup */ - phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); - phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); - - /* Get all the module params for configuring this host */ - lpfc_get_cfgparam(phba); - /* - * Since the sg_tablesize is module parameter, the sg_dma_buf_size - * used to create the sg_dma_buf_pool must be dynamically calculated. - * 2 segments are added since the IOCB needs a command and response bde. - */ - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp) + - ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); - - if (phba->cfg_enable_bg) { - phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; - phba->cfg_sg_dma_buf_size += - phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); + if (cfg_mode == 2) { + /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ + retval = lpfc_sli_config_port(phba, 3); + if (!retval) { + /* Now, try to enable MSI-X interrupt mode */ + retval = lpfc_enable_msix(phba); + if (!retval) { + /* Indicate initialization to MSI-X mode */ + phba->intr_type = MSIX; + intr_mode = 2; + } + } } - /* Also reinitialize the host templates with new values. */ - lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; - lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; - - phba->max_vpi = LPFC_MAX_VPI; - /* This will be set to correct value after config_port mbox */ - phba->max_vports = 0; - - /* - * Initialize the SLI Layer to run with lpfc HBAs. - */ - lpfc_sli_setup(phba); - lpfc_sli_queue_setup(phba); - - /* Allocate device driver memory */ - if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) - return -ENOMEM; + /* Fallback to MSI if MSI-X initialization failed */ + if (cfg_mode >= 1 && phba->intr_type == NONE) { + retval = lpfc_enable_msi(phba); + if (!retval) { + /* Indicate initialization to MSI mode */ + phba->intr_type = MSI; + intr_mode = 1; + } + } - return 0; + /* Fallback to INTx if both MSI-X/MSI initalization failed */ + if (phba->intr_type == NONE) { + retval = request_irq(phba->pcidev->irq, lpfc_intr_handler, + IRQF_SHARED, LPFC_DRIVER_NAME, phba); + if (!retval) { + /* Indicate initialization to INTx mode */ + phba->intr_type = INTx; + intr_mode = 0; + } + } + return intr_mode; } /** - * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev + * lpfc_disable_intr - Disable device interrupt * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to unset the driver internal resources set up - * specific for supporting the SLI-3 HBA device it attached to. + * This routine is invoked to disable device interrupt and disassociate the + * driver's interrupt handler(s) from interrupt vector(s). Depending on the + * interrupt mode, the driver will release the interrupt vector(s) for the + * message signaled interrupt. **/ static void -lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) +lpfc_disable_intr(struct lpfc_hba *phba) { - /* Free device driver memory allocated */ - lpfc_mem_free_all(phba); + /* Disable the currently initialized interrupt mode */ + if (phba->intr_type == MSIX) + lpfc_disable_msix(phba); + else if (phba->intr_type == MSI) + lpfc_disable_msi(phba); + else if (phba->intr_type == INTx) + free_irq(phba->pcidev->irq, phba); + + /* Reset interrupt management states */ + phba->intr_type = NONE; + phba->sli.slistat.sli_intr = 0; return; } /** - * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev - * @phba: pointer to lpfc hba data structure. + * lpfc_pci_probe_one - lpfc PCI probe func to register device to PCI subsystem + * @pdev: pointer to PCI device + * @pid: pointer to PCI device identifier * - * This routine is invoked to set up the driver internal resources specific to - * support the SLI-4 HBA device it attached to. + * This routine is to be registered to the kernel's PCI subsystem. When an + * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at + * PCI device-specific information of the device and driver to see if the + * driver state that it can support this kind of device. If the match is + * successful, the driver core invokes this routine. If this routine + * determines it can claim the HBA, it does all the initialization that it + * needs to do to handle the HBA properly. * - * Return codes - * 0 - sucessful - * other values - error + * Return code + * 0 - driver can claim the device + * negative value - driver can not claim the device **/ -static int -lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) +static int __devinit +lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) { - struct lpfc_sli *psli; - int rc; - int i, hbq_count; + struct lpfc_vport *vport = NULL; + struct lpfc_hba *phba; + struct lpfc_sli *psli; + struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; + struct Scsi_Host *shost = NULL; + void *ptr; + unsigned long bar0map_len, bar2map_len; + int error = -ENODEV, retval; + int i, hbq_count; + uint16_t iotag; + uint32_t cfg_mode, intr_mode; + int bars = pci_select_bars(pdev, IORESOURCE_MEM); + struct lpfc_adapter_event_header adapter_event; - /* Before proceed, wait for POST done and device ready */ - rc = lpfc_sli4_post_status_check(phba); - if (rc) - return -ENODEV; + if (pci_enable_device_mem(pdev)) + goto out; + if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME)) + goto out_disable_device; + + phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL); + if (!phba) + goto out_release_regions; + + atomic_set(&phba->fast_event_count, 0); + spin_lock_init(&phba->hbalock); + + /* Initialize ndlp management spinlock */ + spin_lock_init(&phba->ndlp_lock); + + phba->pcidev = pdev; + + /* Assign an unused board number */ + if ((phba->brd_no = lpfc_get_instance()) < 0) + goto out_free_phba; + INIT_LIST_HEAD(&phba->port_list); + init_waitqueue_head(&phba->wait_4_mlo_m_q); /* - * Initialize timers used by driver + * Get all the module params for configuring this host and then + * establish the host. */ + lpfc_get_cfgparam(phba); + phba->max_vpi = LPFC_MAX_VPI; - /* Heartbeat timer */ + /* Initialize timers used by driver */ init_timer(&phba->hb_tmofunc); phba->hb_tmofunc.function = lpfc_hb_timeout; phba->hb_tmofunc.data = (unsigned long)phba; psli = &phba->sli; - /* MBOX heartbeat timer */ init_timer(&psli->mbox_tmo); psli->mbox_tmo.function = lpfc_mbox_timeout; psli->mbox_tmo.data = (unsigned long) phba; - /* Fabric block timer */ + init_timer(&phba->fcp_poll_timer); + phba->fcp_poll_timer.function = lpfc_poll_timeout; + phba->fcp_poll_timer.data = (unsigned long) phba; init_timer(&phba->fabric_block_timer); phba->fabric_block_timer.function = lpfc_fabric_block_timeout; phba->fabric_block_timer.data = (unsigned long) phba; - /* EA polling mode timer */ init_timer(&phba->eratt_poll); phba->eratt_poll.function = lpfc_poll_eratt; phba->eratt_poll.data = (unsigned long) phba; - /* - * We need to do a READ_CONFIG mailbox command here before - * calling lpfc_get_cfgparam. For VFs this will report the - * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. - * All of the resources allocated - * for this Port are tied to these values. - */ - /* Get all the module params for configuring this host */ - lpfc_get_cfgparam(phba); - phba->max_vpi = LPFC_MAX_VPI; - /* This will be set to correct value after the read_config mbox */ - phba->max_vports = 0; - - /* Program the default value of vlan_id and fc_map */ - phba->valid_vlan = 0; - phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; - phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; - phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; - /* - * Since the sg_tablesize is module parameter, the sg_dma_buf_size - * used to create the sg_dma_buf_pool must be dynamically calculated. - * 2 segments are added since the IOCB needs a command and response bde. - * To insure that the scsi sgl does not cross a 4k page boundary only - * sgl sizes of 1k, 2k, 4k, and 8k are supported. - * Table of sgl sizes and seg_cnt: - * sgl size, sg_seg_cnt total seg - * 1k 50 52 - * 2k 114 116 - * 4k 242 244 - * 8k 498 500 - * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024 - * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048 - * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096 - * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192 - */ - if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT) - phba->cfg_sg_seg_cnt = 50; - else if (phba->cfg_sg_seg_cnt <= 114) - phba->cfg_sg_seg_cnt = 114; - else if (phba->cfg_sg_seg_cnt <= 242) - phba->cfg_sg_seg_cnt = 242; - else - phba->cfg_sg_seg_cnt = 498; - - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) - + sizeof(struct fcp_rsp); - phba->cfg_sg_dma_buf_size += - ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); + pci_set_master(pdev); + pci_save_state(pdev); + pci_try_set_mwi(pdev); - /* Initialize buffer queue management fields */ - hbq_count = lpfc_sli_hbq_count(); - for (i = 0; i < hbq_count; ++i) - INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); - INIT_LIST_HEAD(&phba->rb_pend_list); - phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; - phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free; + if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0) + if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(32)) != 0) + goto out_idr_remove; /* - * Initialize the SLI Layer to run with lpfc SLI4 HBAs. + * Get the bus address of Bar0 and Bar2 and the number of bytes + * required by each mapping. */ - /* Initialize the Abort scsi buffer list used by driver */ - spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); - INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); - /* This abort list used by worker thread */ - spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock); + phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0); + bar0map_len = pci_resource_len(phba->pcidev, 0); - /* - * Initialize dirver internal slow-path work queues - */ + phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2); + bar2map_len = pci_resource_len(phba->pcidev, 2); - /* Driver internel slow-path CQ Event pool */ - INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool); - /* Response IOCB work queue list */ - INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue); - /* Asynchronous event CQ Event work queue list */ - INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue); - /* Fast-path XRI aborted CQ Event work queue list */ - INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue); - /* Slow-path XRI aborted CQ Event work queue list */ - INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue); - /* Receive queue CQ Event work queue list */ - INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue); - - /* Initialize the driver internal SLI layer lists. */ - lpfc_sli_setup(phba); - lpfc_sli_queue_setup(phba); + /* Map HBA SLIM to a kernel virtual address. */ + phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); + if (!phba->slim_memmap_p) { + error = -ENODEV; + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for SLIM memory.\n"); + goto out_idr_remove; + } - /* Allocate device driver memory */ - rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ); - if (rc) - return -ENOMEM; + /* Map HBA Control Registers to a kernel virtual address. */ + phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); + if (!phba->ctrl_regs_memmap_p) { + error = -ENODEV; + dev_printk(KERN_ERR, &pdev->dev, + "ioremap failed for HBA control registers.\n"); + goto out_iounmap_slim; + } - /* Create the bootstrap mailbox command */ - rc = lpfc_create_bootstrap_mbox(phba); - if (unlikely(rc)) - goto out_free_mem; + /* Allocate memory for SLI-2 structures */ + phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev, + SLI2_SLIM_SIZE, + &phba->slim2p.phys, + GFP_KERNEL); + if (!phba->slim2p.virt) + goto out_iounmap; - /* Set up the host's endian order with the device. */ - rc = lpfc_setup_endian_order(phba); - if (unlikely(rc)) - goto out_free_bsmbx; + memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); + phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); + phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); + phba->IOCBs = (phba->slim2p.virt + + offsetof(struct lpfc_sli2_slim, IOCBs)); - /* Set up the hba's configuration parameters. */ - rc = lpfc_sli4_read_config(phba); - if (unlikely(rc)) - goto out_free_bsmbx; + phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev, + lpfc_sli_hbq_size(), + &phba->hbqslimp.phys, + GFP_KERNEL); + if (!phba->hbqslimp.virt) + goto out_free_slim; - /* Perform a function reset */ - rc = lpfc_pci_function_reset(phba); - if (unlikely(rc)) - goto out_free_bsmbx; + hbq_count = lpfc_sli_hbq_count(); + ptr = phba->hbqslimp.virt; + for (i = 0; i < hbq_count; ++i) { + phba->hbqs[i].hbq_virt = ptr; + INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); + ptr += (lpfc_hbq_defs[i]->entry_count * + sizeof(struct lpfc_hbq_entry)); + } + phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; + phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; - /* Create all the SLI4 queues */ - rc = lpfc_sli4_queue_create(phba); - if (rc) - goto out_free_bsmbx; + memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); - /* Create driver internal CQE event pool */ - rc = lpfc_sli4_cq_event_pool_create(phba); - if (rc) - goto out_destroy_queue; + INIT_LIST_HEAD(&phba->hbqbuf_in_list); - /* Initialize and populate the iocb list per host */ - rc = lpfc_init_sgl_list(phba); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1400 Failed to initialize sgl list.\n"); - goto out_destroy_cq_event_pool; - } - rc = lpfc_init_active_sgl_array(phba); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1430 Failed to initialize sgl list.\n"); - goto out_free_sgl_list; - } + /* Initialize the SLI Layer to run with lpfc HBAs. */ + lpfc_sli_setup(phba); + lpfc_sli_queue_setup(phba); - rc = lpfc_sli4_init_rpi_hdrs(phba); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1432 Failed to initialize rpi headers.\n"); - goto out_free_active_sgl; + retval = lpfc_mem_alloc(phba); + if (retval) { + error = retval; + goto out_free_hbqslimp; } - phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * - phba->cfg_fcp_eq_count), GFP_KERNEL); - if (!phba->sli4_hba.fcp_eq_hdl) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2572 Failed allocate memory for fast-path " - "per-EQ handle array\n"); - goto out_remove_rpi_hdrs; - } + /* Initialize and populate the iocb list per host. */ + INIT_LIST_HEAD(&phba->lpfc_iocb_list); + for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) { + iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); + if (iocbq_entry == NULL) { + printk(KERN_ERR "%s: only allocated %d iocbs of " + "expected %d count. Unloading driver.\n", + __func__, i, LPFC_IOCB_LIST_CNT); + error = -ENOMEM; + goto out_free_iocbq; + } - phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * - phba->sli4_hba.cfg_eqn), GFP_KERNEL); - if (!phba->sli4_hba.msix_entries) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2573 Failed allocate memory for msi-x " - "interrupt vector entries\n"); - goto out_free_fcp_eq_hdl; + iotag = lpfc_sli_next_iotag(phba, iocbq_entry); + if (iotag == 0) { + kfree (iocbq_entry); + printk(KERN_ERR "%s: failed to allocate IOTAG. " + "Unloading driver.\n", + __func__); + error = -ENOMEM; + goto out_free_iocbq; + } + + spin_lock_irq(&phba->hbalock); + list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); + phba->total_iocbq_bufs++; + spin_unlock_irq(&phba->hbalock); } - return rc; + /* Initialize HBA structure */ + phba->fc_edtov = FF_DEF_EDTOV; + phba->fc_ratov = FF_DEF_RATOV; + phba->fc_altov = FF_DEF_ALTOV; + phba->fc_arbtov = FF_DEF_ARBTOV; -out_free_fcp_eq_hdl: - kfree(phba->sli4_hba.fcp_eq_hdl); -out_remove_rpi_hdrs: - lpfc_sli4_remove_rpi_hdrs(phba); -out_free_active_sgl: - lpfc_free_active_sgl(phba); -out_free_sgl_list: - lpfc_free_sgl_list(phba); -out_destroy_cq_event_pool: - lpfc_sli4_cq_event_pool_destroy(phba); -out_destroy_queue: - lpfc_sli4_queue_destroy(phba); -out_free_bsmbx: - lpfc_destroy_bootstrap_mbox(phba); -out_free_mem: - lpfc_mem_free(phba); - return rc; -} + INIT_LIST_HEAD(&phba->work_list); + phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); + phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4)); -/** - * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to unset the driver internal resources set up - * specific for supporting the SLI-4 HBA device it attached to. - **/ -static void -lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) -{ - struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; + /* Initialize the wait queue head for the kernel thread */ + init_waitqueue_head(&phba->work_waitq); - /* unregister default FCFI from the HBA */ - lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); + /* Startup the kernel thread for this host adapter. */ + phba->worker_thread = kthread_run(lpfc_do_work, phba, + "lpfc_worker_%d", phba->brd_no); + if (IS_ERR(phba->worker_thread)) { + error = PTR_ERR(phba->worker_thread); + goto out_free_iocbq; + } - /* Free the default FCR table */ - lpfc_sli_remove_dflt_fcf(phba); + /* Initialize the list of scsi buffers used by driver for scsi IO. */ + spin_lock_init(&phba->scsi_buf_list_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); - /* Free memory allocated for msi-x interrupt vector entries */ - kfree(phba->sli4_hba.msix_entries); + /* Initialize list of fabric iocbs */ + INIT_LIST_HEAD(&phba->fabric_iocb_list); - /* Free memory allocated for fast-path work queue handles */ - kfree(phba->sli4_hba.fcp_eq_hdl); + /* Initialize list to save ELS buffers */ + INIT_LIST_HEAD(&phba->elsbuf); - /* Free the allocated rpi headers. */ - lpfc_sli4_remove_rpi_hdrs(phba); + vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); + if (!vport) + goto out_kthread_stop; - /* Free the ELS sgl list */ - lpfc_free_active_sgl(phba); - lpfc_free_sgl_list(phba); - - /* Free the SCSI sgl management array */ - kfree(phba->sli4_hba.lpfc_scsi_psb_array); - - /* Free the SLI4 queues */ - lpfc_sli4_queue_destroy(phba); - - /* Free the completion queue EQ event pool */ - lpfc_sli4_cq_event_release_all(phba); - lpfc_sli4_cq_event_pool_destroy(phba); - - /* Reset SLI4 HBA FCoE function */ - lpfc_pci_function_reset(phba); - - /* Free the bsmbx region. */ - lpfc_destroy_bootstrap_mbox(phba); - - /* Free the SLI Layer memory with SLI4 HBAs */ - lpfc_mem_free_all(phba); - - /* Free the current connect table */ - list_for_each_entry_safe(conn_entry, next_conn_entry, - &phba->fcf_conn_rec_list, list) - kfree(conn_entry); - - return; -} - -/** - * lpfc_init_api_table_setup - Set up init api fucntion jump table - * @phba: The hba struct for which this call is being executed. - * @dev_grp: The HBA PCI-Device group number. - * - * This routine sets up the device INIT interface API function jump table - * in @phba struct. - * - * Returns: 0 - success, -ENODEV - failure. - **/ -int -lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) -{ - switch (dev_grp) { - case LPFC_PCI_DEV_LP: - phba->lpfc_hba_down_post = lpfc_hba_down_post_s3; - phba->lpfc_handle_eratt = lpfc_handle_eratt_s3; - phba->lpfc_stop_port = lpfc_stop_port_s3; - break; - case LPFC_PCI_DEV_OC: - phba->lpfc_hba_down_post = lpfc_hba_down_post_s4; - phba->lpfc_handle_eratt = lpfc_handle_eratt_s4; - phba->lpfc_stop_port = lpfc_stop_port_s4; - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1431 Invalid HBA PCI-device group: 0x%x\n", - dev_grp); - return -ENODEV; - break; - } - return 0; -} - -/** - * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to set up the driver internal resources before the - * device specific resource setup to support the HBA device it attached to. - * - * Return codes - * 0 - sucessful - * other values - error - **/ -static int -lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) -{ - /* - * Driver resources common to all SLI revisions - */ - atomic_set(&phba->fast_event_count, 0); - spin_lock_init(&phba->hbalock); - - /* Initialize ndlp management spinlock */ - spin_lock_init(&phba->ndlp_lock); - - INIT_LIST_HEAD(&phba->port_list); - INIT_LIST_HEAD(&phba->work_list); - init_waitqueue_head(&phba->wait_4_mlo_m_q); - - /* Initialize the wait queue head for the kernel thread */ - init_waitqueue_head(&phba->work_waitq); - - /* Initialize the scsi buffer list used by driver for scsi IO */ - spin_lock_init(&phba->scsi_buf_list_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); - - /* Initialize the fabric iocb list */ - INIT_LIST_HEAD(&phba->fabric_iocb_list); - - /* Initialize list to save ELS buffers */ - INIT_LIST_HEAD(&phba->elsbuf); - - /* Initialize FCF connection rec list */ - INIT_LIST_HEAD(&phba->fcf_conn_rec_list); - - return 0; -} - -/** - * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to set up the driver internal resources after the - * device specific resource setup to support the HBA device it attached to. - * - * Return codes - * 0 - sucessful - * other values - error - **/ -static int -lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) -{ - int error; - - /* Startup the kernel thread for this host adapter. */ - phba->worker_thread = kthread_run(lpfc_do_work, phba, - "lpfc_worker_%d", phba->brd_no); - if (IS_ERR(phba->worker_thread)) { - error = PTR_ERR(phba->worker_thread); - return error; - } - - return 0; -} - -/** - * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to unset the driver internal resources set up after - * the device specific resource setup for supporting the HBA device it - * attached to. - **/ -static void -lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba) -{ - /* Stop kernel worker thread */ - kthread_stop(phba->worker_thread); -} - -/** - * lpfc_free_iocb_list - Free iocb list. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to free the driver's IOCB list and memory. - **/ -static void -lpfc_free_iocb_list(struct lpfc_hba *phba) -{ - struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL; - - spin_lock_irq(&phba->hbalock); - list_for_each_entry_safe(iocbq_entry, iocbq_next, - &phba->lpfc_iocb_list, list) { - list_del(&iocbq_entry->list); - kfree(iocbq_entry); - phba->total_iocbq_bufs--; - } - spin_unlock_irq(&phba->hbalock); - - return; -} - -/** - * lpfc_init_iocb_list - Allocate and initialize iocb list. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to allocate and initizlize the driver's IOCB - * list and set up the IOCB tag array accordingly. - * - * Return codes - * 0 - sucessful - * other values - error - **/ -static int -lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) -{ - struct lpfc_iocbq *iocbq_entry = NULL; - uint16_t iotag; - int i; - - /* Initialize and populate the iocb list per host. */ - INIT_LIST_HEAD(&phba->lpfc_iocb_list); - for (i = 0; i < iocb_count; i++) { - iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL); - if (iocbq_entry == NULL) { - printk(KERN_ERR "%s: only allocated %d iocbs of " - "expected %d count. Unloading driver.\n", - __func__, i, LPFC_IOCB_LIST_CNT); - goto out_free_iocbq; - } - - iotag = lpfc_sli_next_iotag(phba, iocbq_entry); - if (iotag == 0) { - kfree(iocbq_entry); - printk(KERN_ERR "%s: failed to allocate IOTAG. " - "Unloading driver.\n", __func__); - goto out_free_iocbq; - } - iocbq_entry->sli4_xritag = NO_XRI; - - spin_lock_irq(&phba->hbalock); - list_add(&iocbq_entry->list, &phba->lpfc_iocb_list); - phba->total_iocbq_bufs++; - spin_unlock_irq(&phba->hbalock); - } - - return 0; - -out_free_iocbq: - lpfc_free_iocb_list(phba); - - return -ENOMEM; -} - -/** - * lpfc_free_sgl_list - Free sgl list. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to free the driver's sgl list and memory. - **/ -static void -lpfc_free_sgl_list(struct lpfc_hba *phba) -{ - struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; - LIST_HEAD(sglq_list); - int rc = 0; - - spin_lock_irq(&phba->hbalock); - list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); - spin_unlock_irq(&phba->hbalock); - - list_for_each_entry_safe(sglq_entry, sglq_next, - &sglq_list, list) { - list_del(&sglq_entry->list); - lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys); - kfree(sglq_entry); - phba->sli4_hba.total_sglq_bufs--; - } - rc = lpfc_sli4_remove_all_sgl_pages(phba); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2005 Unable to deregister pages from HBA: %x", rc); - } - kfree(phba->sli4_hba.lpfc_els_sgl_array); -} - -/** - * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to allocate the driver's active sgl memory. - * This array will hold the sglq_entry's for active IOs. - **/ -static int -lpfc_init_active_sgl_array(struct lpfc_hba *phba) -{ - int size; - size = sizeof(struct lpfc_sglq *); - size *= phba->sli4_hba.max_cfg_param.max_xri; - - phba->sli4_hba.lpfc_sglq_active_list = - kzalloc(size, GFP_KERNEL); - if (!phba->sli4_hba.lpfc_sglq_active_list) - return -ENOMEM; - return 0; -} - -/** - * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to walk through the array of active sglq entries - * and free all of the resources. - * This is just a place holder for now. - **/ -static void -lpfc_free_active_sgl(struct lpfc_hba *phba) -{ - kfree(phba->sli4_hba.lpfc_sglq_active_list); -} - -/** - * lpfc_init_sgl_list - Allocate and initialize sgl list. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to allocate and initizlize the driver's sgl - * list and set up the sgl xritag tag array accordingly. - * - * Return codes - * 0 - sucessful - * other values - error - **/ -static int -lpfc_init_sgl_list(struct lpfc_hba *phba) -{ - struct lpfc_sglq *sglq_entry = NULL; - int i; - int els_xri_cnt; - - els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "2400 lpfc_init_sgl_list els %d.\n", - els_xri_cnt); - /* Initialize and populate the sglq list per host/VF. */ - INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list); - INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list); - - /* Sanity check on XRI management */ - if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2562 No room left for SCSI XRI allocation: " - "max_xri=%d, els_xri=%d\n", - phba->sli4_hba.max_cfg_param.max_xri, - els_xri_cnt); - return -ENOMEM; - } - - /* Allocate memory for the ELS XRI management array */ - phba->sli4_hba.lpfc_els_sgl_array = - kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt), - GFP_KERNEL); - - if (!phba->sli4_hba.lpfc_els_sgl_array) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2401 Failed to allocate memory for ELS " - "XRI management array of size %d.\n", - els_xri_cnt); - return -ENOMEM; - } - - /* Keep the SCSI XRI into the XRI management array */ - phba->sli4_hba.scsi_xri_max = - phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt; - phba->sli4_hba.scsi_xri_cnt = 0; - - phba->sli4_hba.lpfc_scsi_psb_array = - kzalloc((sizeof(struct lpfc_scsi_buf *) * - phba->sli4_hba.scsi_xri_max), GFP_KERNEL); - - if (!phba->sli4_hba.lpfc_scsi_psb_array) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2563 Failed to allocate memory for SCSI " - "XRI management array of size %d.\n", - phba->sli4_hba.scsi_xri_max); - kfree(phba->sli4_hba.lpfc_els_sgl_array); - return -ENOMEM; - } - - for (i = 0; i < els_xri_cnt; i++) { - sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL); - if (sglq_entry == NULL) { - printk(KERN_ERR "%s: only allocated %d sgls of " - "expected %d count. Unloading driver.\n", - __func__, i, els_xri_cnt); - goto out_free_mem; - } - - sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba); - if (sglq_entry->sli4_xritag == NO_XRI) { - kfree(sglq_entry); - printk(KERN_ERR "%s: failed to allocate XRI.\n" - "Unloading driver.\n", __func__); - goto out_free_mem; - } - sglq_entry->buff_type = GEN_BUFF_TYPE; - sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys); - if (sglq_entry->virt == NULL) { - kfree(sglq_entry); - printk(KERN_ERR "%s: failed to allocate mbuf.\n" - "Unloading driver.\n", __func__); - goto out_free_mem; - } - sglq_entry->sgl = sglq_entry->virt; - memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE); - - /* The list order is used by later block SGL registraton */ - spin_lock_irq(&phba->hbalock); - list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); - phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; - phba->sli4_hba.total_sglq_bufs++; - spin_unlock_irq(&phba->hbalock); - } - return 0; - -out_free_mem: - kfree(phba->sli4_hba.lpfc_scsi_psb_array); - lpfc_free_sgl_list(phba); - return -ENOMEM; -} - -/** - * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to post rpi header templates to the - * HBA consistent with the SLI-4 interface spec. This routine - * posts a PAGE_SIZE memory region to the port to hold up to - * PAGE_SIZE modulo 64 rpi context headers. - * No locks are held here because this is an initialization routine - * called only from probe or lpfc_online when interrupts are not - * enabled and the driver is reinitializing the device. - * - * Return codes - * 0 - sucessful - * ENOMEM - No availble memory - * EIO - The mailbox failed to complete successfully. - **/ -int -lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba) -{ - int rc = 0; - int longs; - uint16_t rpi_count; - struct lpfc_rpi_hdr *rpi_hdr; - - INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list); - - /* - * Provision an rpi bitmask range for discovery. The total count - * is the difference between max and base + 1. - */ - rpi_count = phba->sli4_hba.max_cfg_param.rpi_base + - phba->sli4_hba.max_cfg_param.max_rpi - 1; - - longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG; - phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long), - GFP_KERNEL); - if (!phba->sli4_hba.rpi_bmask) - return -ENOMEM; - - rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); - if (!rpi_hdr) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0391 Error during rpi post operation\n"); - lpfc_sli4_remove_rpis(phba); - rc = -ENODEV; - } - - return rc; -} - -/** - * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to allocate a single 4KB memory region to - * support rpis and stores them in the phba. This single region - * provides support for up to 64 rpis. The region is used globally - * by the device. - * - * Returns: - * A valid rpi hdr on success. - * A NULL pointer on any failure. - **/ -struct lpfc_rpi_hdr * -lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba) -{ - uint16_t rpi_limit, curr_rpi_range; - struct lpfc_dmabuf *dmabuf; - struct lpfc_rpi_hdr *rpi_hdr; - - rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base + - phba->sli4_hba.max_cfg_param.max_rpi - 1; - - spin_lock_irq(&phba->hbalock); - curr_rpi_range = phba->sli4_hba.next_rpi; - spin_unlock_irq(&phba->hbalock); - - /* - * The port has a limited number of rpis. The increment here - * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value - * and to allow the full max_rpi range per port. - */ - if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit) - return NULL; - - /* - * First allocate the protocol header region for the port. The - * port expects a 4KB DMA-mapped memory region that is 4K aligned. - */ - dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); - if (!dmabuf) - return NULL; - - dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, - LPFC_HDR_TEMPLATE_SIZE, - &dmabuf->phys, - GFP_KERNEL); - if (!dmabuf->virt) { - rpi_hdr = NULL; - goto err_free_dmabuf; - } - - memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE); - if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { - rpi_hdr = NULL; - goto err_free_coherent; - } - - /* Save the rpi header data for cleanup later. */ - rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL); - if (!rpi_hdr) - goto err_free_coherent; - - rpi_hdr->dmabuf = dmabuf; - rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE; - rpi_hdr->page_count = 1; - spin_lock_irq(&phba->hbalock); - rpi_hdr->start_rpi = phba->sli4_hba.next_rpi; - list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list); - - /* - * The next_rpi stores the next module-64 rpi value to post - * in any subsequent rpi memory region postings. - */ - phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT; - spin_unlock_irq(&phba->hbalock); - return rpi_hdr; - - err_free_coherent: - dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE, - dmabuf->virt, dmabuf->phys); - err_free_dmabuf: - kfree(dmabuf); - return NULL; -} - -/** - * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to remove all memory resources allocated - * to support rpis. This routine presumes the caller has released all - * rpis consumed by fabric or port logins and is prepared to have - * the header pages removed. - **/ -void -lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba) -{ - struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr; - - list_for_each_entry_safe(rpi_hdr, next_rpi_hdr, - &phba->sli4_hba.lpfc_rpi_hdr_list, list) { - list_del(&rpi_hdr->list); - dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len, - rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys); - kfree(rpi_hdr->dmabuf); - kfree(rpi_hdr); - } - - phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; - memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask)); -} - -/** - * lpfc_hba_alloc - Allocate driver hba data structure for a device. - * @pdev: pointer to pci device data structure. - * - * This routine is invoked to allocate the driver hba data structure for an - * HBA device. If the allocation is successful, the phba reference to the - * PCI device data structure is set. - * - * Return codes - * pointer to @phba - sucessful - * NULL - error - **/ -static struct lpfc_hba * -lpfc_hba_alloc(struct pci_dev *pdev) -{ - struct lpfc_hba *phba; - - /* Allocate memory for HBA structure */ - phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL); - if (!phba) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1417 Failed to allocate hba struct.\n"); - return NULL; - } - - /* Set reference to PCI device in HBA structure */ - phba->pcidev = pdev; - - /* Assign an unused board number */ - phba->brd_no = lpfc_get_instance(); - if (phba->brd_no < 0) { - kfree(phba); - return NULL; - } - - return phba; -} - -/** - * lpfc_hba_free - Free driver hba data structure with a device. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to free the driver hba data structure with an - * HBA device. - **/ -static void -lpfc_hba_free(struct lpfc_hba *phba) -{ - /* Release the driver assigned board number */ - idr_remove(&lpfc_hba_index, phba->brd_no); - - kfree(phba); - return; -} - -/** - * lpfc_create_shost - Create hba physical port with associated scsi host. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to create HBA physical port and associate a SCSI - * host with it. - * - * Return codes - * 0 - sucessful - * other values - error - **/ -static int -lpfc_create_shost(struct lpfc_hba *phba) -{ - struct lpfc_vport *vport; - struct Scsi_Host *shost; - - /* Initialize HBA FC structure */ - phba->fc_edtov = FF_DEF_EDTOV; - phba->fc_ratov = FF_DEF_RATOV; - phba->fc_altov = FF_DEF_ALTOV; - phba->fc_arbtov = FF_DEF_ARBTOV; - - vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev); - if (!vport) - return -ENODEV; - - shost = lpfc_shost_from_vport(vport); - phba->pport = vport; - lpfc_debugfs_initialize(vport); - /* Put reference to SCSI host to driver's device private data */ - pci_set_drvdata(phba->pcidev, shost); - - return 0; -} - -/** - * lpfc_destroy_shost - Destroy hba physical port with associated scsi host. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to destroy HBA physical port and the associated - * SCSI host. - **/ -static void -lpfc_destroy_shost(struct lpfc_hba *phba) -{ - struct lpfc_vport *vport = phba->pport; - - /* Destroy physical port that associated with the SCSI host */ - destroy_port(vport); - - return; -} - -/** - * lpfc_setup_bg - Setup Block guard structures and debug areas. - * @phba: pointer to lpfc hba data structure. - * @shost: the shost to be used to detect Block guard settings. - * - * This routine sets up the local Block guard protocol settings for @shost. - * This routine also allocates memory for debugging bg buffers. - **/ -static void -lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) -{ - int pagecnt = 10; - if (lpfc_prot_mask && lpfc_prot_guard) { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "1478 Registering BlockGuard with the " - "SCSI layer\n"); - scsi_host_set_prot(shost, lpfc_prot_mask); - scsi_host_set_guard(shost, lpfc_prot_guard); - } - if (!_dump_buf_data) { - while (pagecnt) { - spin_lock_init(&_dump_buf_lock); - _dump_buf_data = - (char *) __get_free_pages(GFP_KERNEL, pagecnt); - if (_dump_buf_data) { - printk(KERN_ERR "BLKGRD allocated %d pages for " - "_dump_buf_data at 0x%p\n", - (1 << pagecnt), _dump_buf_data); - _dump_buf_data_order = pagecnt; - memset(_dump_buf_data, 0, - ((1 << PAGE_SHIFT) << pagecnt)); - break; - } else - --pagecnt; - } - if (!_dump_buf_data_order) - printk(KERN_ERR "BLKGRD ERROR unable to allocate " - "memory for hexdump\n"); - } else - printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" - "\n", _dump_buf_data); - if (!_dump_buf_dif) { - while (pagecnt) { - _dump_buf_dif = - (char *) __get_free_pages(GFP_KERNEL, pagecnt); - if (_dump_buf_dif) { - printk(KERN_ERR "BLKGRD allocated %d pages for " - "_dump_buf_dif at 0x%p\n", - (1 << pagecnt), _dump_buf_dif); - _dump_buf_dif_order = pagecnt; - memset(_dump_buf_dif, 0, - ((1 << PAGE_SHIFT) << pagecnt)); - break; - } else - --pagecnt; - } - if (!_dump_buf_dif_order) - printk(KERN_ERR "BLKGRD ERROR unable to allocate " - "memory for hexdump\n"); - } else - printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", - _dump_buf_dif); -} - -/** - * lpfc_post_init_setup - Perform necessary device post initialization setup. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to perform all the necessary post initialization - * setup for the device. - **/ -static void -lpfc_post_init_setup(struct lpfc_hba *phba) -{ - struct Scsi_Host *shost; - struct lpfc_adapter_event_header adapter_event; - - /* Get the default values for Model Name and Description */ - lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); - - /* - * hba setup may have changed the hba_queue_depth so we need to - * adjust the value of can_queue. - */ - shost = pci_get_drvdata(phba->pcidev); - shost->can_queue = phba->cfg_hba_queue_depth - 10; - if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) - lpfc_setup_bg(phba, shost); - - lpfc_host_attrib_init(shost); - - if (phba->cfg_poll & DISABLE_FCP_RING_INT) { - spin_lock_irq(shost->host_lock); - lpfc_poll_start_timer(phba); - spin_unlock_irq(shost->host_lock); - } - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0428 Perform SCSI scan\n"); - /* Send board arrival event to upper layer */ - adapter_event.event_type = FC_REG_ADAPTER_EVENT; - adapter_event.subcategory = LPFC_EVENT_ARRIVAL; - fc_host_post_vendor_event(shost, fc_get_event_number(), - sizeof(adapter_event), - (char *) &adapter_event, - LPFC_NL_VENDOR_ID); - return; -} - -/** - * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to set up the PCI device memory space for device - * with SLI-3 interface spec. - * - * Return codes - * 0 - sucessful - * other values - error - **/ -static int -lpfc_sli_pci_mem_setup(struct lpfc_hba *phba) -{ - struct pci_dev *pdev; - unsigned long bar0map_len, bar2map_len; - int i, hbq_count; - void *ptr; - int error = -ENODEV; - - /* Obtain PCI device reference */ - if (!phba->pcidev) - return error; - else - pdev = phba->pcidev; - - /* Set the device DMA mask size */ - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) - return error; - - /* Get the bus address of Bar0 and Bar2 and the number of bytes - * required by each mapping. - */ - phba->pci_bar0_map = pci_resource_start(pdev, 0); - bar0map_len = pci_resource_len(pdev, 0); - - phba->pci_bar2_map = pci_resource_start(pdev, 2); - bar2map_len = pci_resource_len(pdev, 2); - - /* Map HBA SLIM to a kernel virtual address. */ - phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len); - if (!phba->slim_memmap_p) { - dev_printk(KERN_ERR, &pdev->dev, - "ioremap failed for SLIM memory.\n"); - goto out; - } - - /* Map HBA Control Registers to a kernel virtual address. */ - phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len); - if (!phba->ctrl_regs_memmap_p) { - dev_printk(KERN_ERR, &pdev->dev, - "ioremap failed for HBA control registers.\n"); - goto out_iounmap_slim; - } - - /* Allocate memory for SLI-2 structures */ - phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, - SLI2_SLIM_SIZE, - &phba->slim2p.phys, - GFP_KERNEL); - if (!phba->slim2p.virt) - goto out_iounmap; - - memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE); - phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); - phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb)); - phba->IOCBs = (phba->slim2p.virt + - offsetof(struct lpfc_sli2_slim, IOCBs)); - - phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev, - lpfc_sli_hbq_size(), - &phba->hbqslimp.phys, - GFP_KERNEL); - if (!phba->hbqslimp.virt) - goto out_free_slim; - - hbq_count = lpfc_sli_hbq_count(); - ptr = phba->hbqslimp.virt; - for (i = 0; i < hbq_count; ++i) { - phba->hbqs[i].hbq_virt = ptr; - INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list); - ptr += (lpfc_hbq_defs[i]->entry_count * - sizeof(struct lpfc_hbq_entry)); - } - phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc; - phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free; - - memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size()); - - INIT_LIST_HEAD(&phba->rb_pend_list); - - phba->MBslimaddr = phba->slim_memmap_p; - phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; - phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; - phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; - phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; - - return 0; - -out_free_slim: - dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, - phba->slim2p.virt, phba->slim2p.phys); -out_iounmap: - iounmap(phba->ctrl_regs_memmap_p); -out_iounmap_slim: - iounmap(phba->slim_memmap_p); -out: - return error; -} - -/** - * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to unset the PCI device memory space for device - * with SLI-3 interface spec. - **/ -static void -lpfc_sli_pci_mem_unset(struct lpfc_hba *phba) -{ - struct pci_dev *pdev; - - /* Obtain PCI device reference */ - if (!phba->pcidev) - return; - else - pdev = phba->pcidev; - - /* Free coherent DMA memory allocated */ - dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), - phba->hbqslimp.virt, phba->hbqslimp.phys); - dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, - phba->slim2p.virt, phba->slim2p.phys); - - /* I/O memory unmap */ - iounmap(phba->ctrl_regs_memmap_p); - iounmap(phba->slim_memmap_p); - - return; -} - -/** - * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to wait for SLI4 device Power On Self Test (POST) - * done and check status. - * - * Return 0 if successful, otherwise -ENODEV. - **/ -int -lpfc_sli4_post_status_check(struct lpfc_hba *phba) -{ - struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad; - uint32_t onlnreg0, onlnreg1; - int i, port_error = -ENODEV; - - if (!phba->sli4_hba.STAregaddr) - return -ENODEV; - - /* With uncoverable error, log the error message and return error */ - onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); - onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); - if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { - uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr); - uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr); - if (uerrlo_reg.word0 || uerrhi_reg.word0) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1422 HBA Unrecoverable error: " - "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " - "online0_reg=0x%x, online1_reg=0x%x\n", - uerrlo_reg.word0, uerrhi_reg.word0, - onlnreg0, onlnreg1); - } - return -ENODEV; - } - - /* Wait up to 30 seconds for the SLI Port POST done and ready */ - for (i = 0; i < 3000; i++) { - sta_reg.word0 = readl(phba->sli4_hba.STAregaddr); - /* Encounter fatal POST error, break out */ - if (bf_get(lpfc_hst_state_perr, &sta_reg)) { - port_error = -ENODEV; - break; - } - if (LPFC_POST_STAGE_ARMFW_READY == - bf_get(lpfc_hst_state_port_status, &sta_reg)) { - port_error = 0; - break; - } - msleep(10); - } - - if (port_error) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1408 Failure HBA POST Status: sta_reg=0x%x, " - "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, " - "dl=x%x, pstatus=x%x\n", sta_reg.word0, - bf_get(lpfc_hst_state_perr, &sta_reg), - bf_get(lpfc_hst_state_sfi, &sta_reg), - bf_get(lpfc_hst_state_nip, &sta_reg), - bf_get(lpfc_hst_state_ipc, &sta_reg), - bf_get(lpfc_hst_state_xrom, &sta_reg), - bf_get(lpfc_hst_state_dl, &sta_reg), - bf_get(lpfc_hst_state_port_status, &sta_reg)); - - /* Log device information */ - scratchpad.word0 = readl(phba->sli4_hba.SCRATCHPADregaddr); - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2534 Device Info: ChipType=0x%x, SliRev=0x%x, " - "FeatureL1=0x%x, FeatureL2=0x%x\n", - bf_get(lpfc_scratchpad_chiptype, &scratchpad), - bf_get(lpfc_scratchpad_slirev, &scratchpad), - bf_get(lpfc_scratchpad_featurelevel1, &scratchpad), - bf_get(lpfc_scratchpad_featurelevel2, &scratchpad)); - - return port_error; -} - -/** - * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to set up SLI4 BAR0 PCI config space register - * memory map. - **/ -static void -lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba) -{ - phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p + - LPFC_UERR_STATUS_LO; - phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p + - LPFC_UERR_STATUS_HI; - phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p + - LPFC_ONLINE0; - phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p + - LPFC_ONLINE1; - phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p + - LPFC_SCRATCHPAD; -} - -/** - * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to set up SLI4 BAR1 control status register (CSR) - * memory map. - **/ -static void -lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba) -{ - - phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p + - LPFC_HST_STATE; - phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + - LPFC_HST_ISR0; - phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + - LPFC_HST_IMR0; - phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p + - LPFC_HST_ISCR0; - return; -} - -/** - * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map. - * @phba: pointer to lpfc hba data structure. - * @vf: virtual function number - * - * This routine is invoked to set up SLI4 BAR2 doorbell register memory map - * based on the given viftual function number, @vf. - * - * Return 0 if successful, otherwise -ENODEV. - **/ -static int -lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf) -{ - if (vf > LPFC_VIR_FUNC_MAX) - return -ENODEV; - - phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + - vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL); - phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + - vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL); - phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + - vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL); - phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p + - vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL); - phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p + - vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX); - return 0; -} - -/** - * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to create the bootstrap mailbox - * region consistent with the SLI-4 interface spec. This - * routine allocates all memory necessary to communicate - * mailbox commands to the port and sets up all alignment - * needs. No locks are expected to be held when calling - * this routine. - * - * Return codes - * 0 - sucessful - * ENOMEM - could not allocated memory. - **/ -static int -lpfc_create_bootstrap_mbox(struct lpfc_hba *phba) -{ - uint32_t bmbx_size; - struct lpfc_dmabuf *dmabuf; - struct dma_address *dma_address; - uint32_t pa_addr; - uint64_t phys_addr; - - dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); - if (!dmabuf) - return -ENOMEM; - - /* - * The bootstrap mailbox region is comprised of 2 parts - * plus an alignment restriction of 16 bytes. - */ - bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); - dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, - bmbx_size, - &dmabuf->phys, - GFP_KERNEL); - if (!dmabuf->virt) { - kfree(dmabuf); - return -ENOMEM; - } - memset(dmabuf->virt, 0, bmbx_size); - - /* - * Initialize the bootstrap mailbox pointers now so that the register - * operations are simple later. The mailbox dma address is required - * to be 16-byte aligned. Also align the virtual memory as each - * maibox is copied into the bmbx mailbox region before issuing the - * command to the port. - */ - phba->sli4_hba.bmbx.dmabuf = dmabuf; - phba->sli4_hba.bmbx.bmbx_size = bmbx_size; - - phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt, - LPFC_ALIGN_16_BYTE); - phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys, - LPFC_ALIGN_16_BYTE); - - /* - * Set the high and low physical addresses now. The SLI4 alignment - * requirement is 16 bytes and the mailbox is posted to the port - * as two 30-bit addresses. The other data is a bit marking whether - * the 30-bit address is the high or low address. - * Upcast bmbx aphys to 64bits so shift instruction compiles - * clean on 32 bit machines. - */ - dma_address = &phba->sli4_hba.bmbx.dma_address; - phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys; - pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff); - dma_address->addr_hi = (uint32_t) ((pa_addr << 2) | - LPFC_BMBX_BIT1_ADDR_HI); - - pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff); - dma_address->addr_lo = (uint32_t) ((pa_addr << 2) | - LPFC_BMBX_BIT1_ADDR_LO); - return 0; -} - -/** - * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to teardown the bootstrap mailbox - * region and release all host resources. This routine requires - * the caller to ensure all mailbox commands recovered, no - * additional mailbox comands are sent, and interrupts are disabled - * before calling this routine. - * - **/ -static void -lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) -{ - dma_free_coherent(&phba->pcidev->dev, - phba->sli4_hba.bmbx.bmbx_size, - phba->sli4_hba.bmbx.dmabuf->virt, - phba->sli4_hba.bmbx.dmabuf->phys); - - kfree(phba->sli4_hba.bmbx.dmabuf); - memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); -} - -/** - * lpfc_sli4_read_config - Get the config parameters. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to read the configuration parameters from the HBA. - * The configuration parameters are used to set the base and maximum values - * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource - * allocation for the port. - * - * Return codes - * 0 - sucessful - * ENOMEM - No availble memory - * EIO - The mailbox failed to complete successfully. - **/ -static int -lpfc_sli4_read_config(struct lpfc_hba *phba) -{ - LPFC_MBOXQ_t *pmb; - struct lpfc_mbx_read_config *rd_config; - uint32_t rc = 0; - - pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!pmb) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2011 Unable to allocate memory for issuing " - "SLI_CONFIG_SPECIAL mailbox command\n"); - return -ENOMEM; - } - - lpfc_read_config(phba, pmb); - - rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); - if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2012 Mailbox failed , mbxCmd x%x " - "READ_CONFIG, mbxStatus x%x\n", - bf_get(lpfc_mqe_command, &pmb->u.mqe), - bf_get(lpfc_mqe_status, &pmb->u.mqe)); - rc = -EIO; - } else { - rd_config = &pmb->u.mqe.un.rd_config; - phba->sli4_hba.max_cfg_param.max_xri = - bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); - phba->sli4_hba.max_cfg_param.xri_base = - bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); - phba->sli4_hba.max_cfg_param.max_vpi = - bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config); - phba->sli4_hba.max_cfg_param.vpi_base = - bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config); - phba->sli4_hba.max_cfg_param.max_rpi = - bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config); - phba->sli4_hba.max_cfg_param.rpi_base = - bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config); - phba->sli4_hba.max_cfg_param.max_vfi = - bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config); - phba->sli4_hba.max_cfg_param.vfi_base = - bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config); - phba->sli4_hba.max_cfg_param.max_fcfi = - bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config); - phba->sli4_hba.max_cfg_param.fcfi_base = - bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config); - phba->sli4_hba.max_cfg_param.max_eq = - bf_get(lpfc_mbx_rd_conf_eq_count, rd_config); - phba->sli4_hba.max_cfg_param.max_rq = - bf_get(lpfc_mbx_rd_conf_rq_count, rd_config); - phba->sli4_hba.max_cfg_param.max_wq = - bf_get(lpfc_mbx_rd_conf_wq_count, rd_config); - phba->sli4_hba.max_cfg_param.max_cq = - bf_get(lpfc_mbx_rd_conf_cq_count, rd_config); - phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config); - phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base; - phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base; - phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base; - phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base; - phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi; - phba->max_vports = phba->max_vpi; - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "2003 cfg params XRI(B:%d M:%d), " - "VPI(B:%d M:%d) " - "VFI(B:%d M:%d) " - "RPI(B:%d M:%d) " - "FCFI(B:%d M:%d)\n", - phba->sli4_hba.max_cfg_param.xri_base, - phba->sli4_hba.max_cfg_param.max_xri, - phba->sli4_hba.max_cfg_param.vpi_base, - phba->sli4_hba.max_cfg_param.max_vpi, - phba->sli4_hba.max_cfg_param.vfi_base, - phba->sli4_hba.max_cfg_param.max_vfi, - phba->sli4_hba.max_cfg_param.rpi_base, - phba->sli4_hba.max_cfg_param.max_rpi, - phba->sli4_hba.max_cfg_param.fcfi_base, - phba->sli4_hba.max_cfg_param.max_fcfi); - } - mempool_free(pmb, phba->mbox_mem_pool); - - /* Reset the DFT_HBA_Q_DEPTH to the max xri */ - if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri)) - phba->cfg_hba_queue_depth = - phba->sli4_hba.max_cfg_param.max_xri; - return rc; -} - -/** - * lpfc_dev_endian_order_setup - Notify the port of the host's endian order. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to setup the host-side endian order to the - * HBA consistent with the SLI-4 interface spec. - * - * Return codes - * 0 - sucessful - * ENOMEM - No availble memory - * EIO - The mailbox failed to complete successfully. - **/ -static int -lpfc_setup_endian_order(struct lpfc_hba *phba) -{ - LPFC_MBOXQ_t *mboxq; - uint32_t rc = 0; - uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0, - HOST_ENDIAN_HIGH_WORD1}; - - mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mboxq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0492 Unable to allocate memory for issuing " - "SLI_CONFIG_SPECIAL mailbox command\n"); - return -ENOMEM; - } - - /* - * The SLI4_CONFIG_SPECIAL mailbox command requires the first two - * words to contain special data values and no other data. - */ - memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); - memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data)); - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0493 SLI_CONFIG_SPECIAL mailbox failed with " - "status x%x\n", - rc); - rc = -EIO; - } - - mempool_free(mboxq, phba->mbox_mem_pool); - return rc; -} - -/** - * lpfc_sli4_queue_create - Create all the SLI4 queues - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA - * operation. For each SLI4 queue type, the parameters such as queue entry - * count (queue depth) shall be taken from the module parameter. For now, - * we just use some constant number as place holder. - * - * Return codes - * 0 - sucessful - * ENOMEM - No availble memory - * EIO - The mailbox failed to complete successfully. - **/ -static int -lpfc_sli4_queue_create(struct lpfc_hba *phba) -{ - struct lpfc_queue *qdesc; - int fcp_eqidx, fcp_cqidx, fcp_wqidx; - int cfg_fcp_wq_count; - int cfg_fcp_eq_count; - - /* - * Sanity check for confiugred queue parameters against the run-time - * device parameters - */ - - /* Sanity check on FCP fast-path WQ parameters */ - cfg_fcp_wq_count = phba->cfg_fcp_wq_count; - if (cfg_fcp_wq_count > - (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) { - cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq - - LPFC_SP_WQN_DEF; - if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2581 Not enough WQs (%d) from " - "the pci function for supporting " - "FCP WQs (%d)\n", - phba->sli4_hba.max_cfg_param.max_wq, - phba->cfg_fcp_wq_count); - goto out_error; - } - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "2582 Not enough WQs (%d) from the pci " - "function for supporting the requested " - "FCP WQs (%d), the actual FCP WQs can " - "be supported: %d\n", - phba->sli4_hba.max_cfg_param.max_wq, - phba->cfg_fcp_wq_count, cfg_fcp_wq_count); - } - /* The actual number of FCP work queues adopted */ - phba->cfg_fcp_wq_count = cfg_fcp_wq_count; - - /* Sanity check on FCP fast-path EQ parameters */ - cfg_fcp_eq_count = phba->cfg_fcp_eq_count; - if (cfg_fcp_eq_count > - (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) { - cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq - - LPFC_SP_EQN_DEF; - if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2574 Not enough EQs (%d) from the " - "pci function for supporting FCP " - "EQs (%d)\n", - phba->sli4_hba.max_cfg_param.max_eq, - phba->cfg_fcp_eq_count); - goto out_error; - } - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "2575 Not enough EQs (%d) from the pci " - "function for supporting the requested " - "FCP EQs (%d), the actual FCP EQs can " - "be supported: %d\n", - phba->sli4_hba.max_cfg_param.max_eq, - phba->cfg_fcp_eq_count, cfg_fcp_eq_count); - } - /* It does not make sense to have more EQs than WQs */ - if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "2593 The number of FCP EQs (%d) is more " - "than the number of FCP WQs (%d), take " - "the number of FCP EQs same as than of " - "WQs (%d)\n", cfg_fcp_eq_count, - phba->cfg_fcp_wq_count, - phba->cfg_fcp_wq_count); - cfg_fcp_eq_count = phba->cfg_fcp_wq_count; - } - /* The actual number of FCP event queues adopted */ - phba->cfg_fcp_eq_count = cfg_fcp_eq_count; - /* The overall number of event queues used */ - phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; - - /* - * Create Event Queues (EQs) - */ - - /* Get EQ depth from module parameter, fake the default for now */ - phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; - phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; - - /* Create slow path event queue */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, - phba->sli4_hba.eq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0496 Failed allocate slow-path EQ\n"); - goto out_error; - } - phba->sli4_hba.sp_eq = qdesc; - - /* Create fast-path FCP Event Queue(s) */ - phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * - phba->cfg_fcp_eq_count), GFP_KERNEL); - if (!phba->sli4_hba.fp_eq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2576 Failed allocate memory for fast-path " - "EQ record array\n"); - goto out_free_sp_eq; - } - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, - phba->sli4_hba.eq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0497 Failed allocate fast-path EQ\n"); - goto out_free_fp_eq; - } - phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc; - } - - /* - * Create Complete Queues (CQs) - */ - - /* Get CQ depth from module parameter, fake the default for now */ - phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; - phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; - - /* Create slow-path Mailbox Command Complete Queue */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, - phba->sli4_hba.cq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0500 Failed allocate slow-path mailbox CQ\n"); - goto out_free_fp_eq; - } - phba->sli4_hba.mbx_cq = qdesc; - - /* Create slow-path ELS Complete Queue */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, - phba->sli4_hba.cq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0501 Failed allocate slow-path ELS CQ\n"); - goto out_free_mbx_cq; - } - phba->sli4_hba.els_cq = qdesc; - - /* Create slow-path Unsolicited Receive Complete Queue */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, - phba->sli4_hba.cq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0502 Failed allocate slow-path USOL RX CQ\n"); - goto out_free_els_cq; - } - phba->sli4_hba.rxq_cq = qdesc; - - /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ - phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * - phba->cfg_fcp_eq_count), GFP_KERNEL); - if (!phba->sli4_hba.fcp_cq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2577 Failed allocate memory for fast-path " - "CQ record array\n"); - goto out_free_rxq_cq; - } - for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, - phba->sli4_hba.cq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0499 Failed allocate fast-path FCP " - "CQ (%d)\n", fcp_cqidx); - goto out_free_fcp_cq; - } - phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; - } - - /* Create Mailbox Command Queue */ - phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; - phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT; - - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, - phba->sli4_hba.mq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0505 Failed allocate slow-path MQ\n"); - goto out_free_fcp_cq; - } - phba->sli4_hba.mbx_wq = qdesc; - - /* - * Create all the Work Queues (WQs) - */ - phba->sli4_hba.wq_esize = LPFC_WQE_SIZE; - phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT; - - /* Create slow-path ELS Work Queue */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, - phba->sli4_hba.wq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0504 Failed allocate slow-path ELS WQ\n"); - goto out_free_mbx_wq; - } - phba->sli4_hba.els_wq = qdesc; - - /* Create fast-path FCP Work Queue(s) */ - phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) * - phba->cfg_fcp_wq_count), GFP_KERNEL); - if (!phba->sli4_hba.fcp_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2578 Failed allocate memory for fast-path " - "WQ record array\n"); - goto out_free_els_wq; - } - for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, - phba->sli4_hba.wq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0503 Failed allocate fast-path FCP " - "WQ (%d)\n", fcp_wqidx); - goto out_free_fcp_wq; - } - phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc; - } - - /* - * Create Receive Queue (RQ) - */ - phba->sli4_hba.rq_esize = LPFC_RQE_SIZE; - phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT; - - /* Create Receive Queue for header */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, - phba->sli4_hba.rq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0506 Failed allocate receive HRQ\n"); - goto out_free_fcp_wq; - } - phba->sli4_hba.hdr_rq = qdesc; - - /* Create Receive Queue for data */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, - phba->sli4_hba.rq_ecount); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0507 Failed allocate receive DRQ\n"); - goto out_free_hdr_rq; - } - phba->sli4_hba.dat_rq = qdesc; - - return 0; - -out_free_hdr_rq: - lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); - phba->sli4_hba.hdr_rq = NULL; -out_free_fcp_wq: - for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) { - lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]); - phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL; - } - kfree(phba->sli4_hba.fcp_wq); -out_free_els_wq: - lpfc_sli4_queue_free(phba->sli4_hba.els_wq); - phba->sli4_hba.els_wq = NULL; -out_free_mbx_wq: - lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); - phba->sli4_hba.mbx_wq = NULL; -out_free_fcp_cq: - for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) { - lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]); - phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL; - } - kfree(phba->sli4_hba.fcp_cq); -out_free_rxq_cq: - lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); - phba->sli4_hba.rxq_cq = NULL; -out_free_els_cq: - lpfc_sli4_queue_free(phba->sli4_hba.els_cq); - phba->sli4_hba.els_cq = NULL; -out_free_mbx_cq: - lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); - phba->sli4_hba.mbx_cq = NULL; -out_free_fp_eq: - for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) { - lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]); - phba->sli4_hba.fp_eq[fcp_eqidx] = NULL; - } - kfree(phba->sli4_hba.fp_eq); -out_free_sp_eq: - lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); - phba->sli4_hba.sp_eq = NULL; -out_error: - return -ENOMEM; -} - -/** - * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to release all the SLI4 queues with the FCoE HBA - * operation. - * - * Return codes - * 0 - sucessful - * ENOMEM - No availble memory - * EIO - The mailbox failed to complete successfully. - **/ -static void -lpfc_sli4_queue_destroy(struct lpfc_hba *phba) -{ - int fcp_qidx; - - /* Release mailbox command work queue */ - lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq); - phba->sli4_hba.mbx_wq = NULL; - - /* Release ELS work queue */ - lpfc_sli4_queue_free(phba->sli4_hba.els_wq); - phba->sli4_hba.els_wq = NULL; - - /* Release FCP work queue */ - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) - lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]); - kfree(phba->sli4_hba.fcp_wq); - phba->sli4_hba.fcp_wq = NULL; - - /* Release unsolicited receive queue */ - lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq); - phba->sli4_hba.hdr_rq = NULL; - lpfc_sli4_queue_free(phba->sli4_hba.dat_rq); - phba->sli4_hba.dat_rq = NULL; - - /* Release unsolicited receive complete queue */ - lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq); - phba->sli4_hba.rxq_cq = NULL; - - /* Release ELS complete queue */ - lpfc_sli4_queue_free(phba->sli4_hba.els_cq); - phba->sli4_hba.els_cq = NULL; - - /* Release mailbox command complete queue */ - lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq); - phba->sli4_hba.mbx_cq = NULL; - - /* Release FCP response complete queue */ - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) - lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]); - kfree(phba->sli4_hba.fcp_cq); - phba->sli4_hba.fcp_cq = NULL; - - /* Release fast-path event queue */ - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) - lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); - kfree(phba->sli4_hba.fp_eq); - phba->sli4_hba.fp_eq = NULL; - - /* Release slow-path event queue */ - lpfc_sli4_queue_free(phba->sli4_hba.sp_eq); - phba->sli4_hba.sp_eq = NULL; - - return; -} - -/** - * lpfc_sli4_queue_setup - Set up all the SLI4 queues - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to set up all the SLI4 queues for the FCoE HBA - * operation. - * - * Return codes - * 0 - sucessful - * ENOMEM - No availble memory - * EIO - The mailbox failed to complete successfully. - **/ -int -lpfc_sli4_queue_setup(struct lpfc_hba *phba) -{ - int rc = -ENOMEM; - int fcp_eqidx, fcp_cqidx, fcp_wqidx; - int fcp_cq_index = 0; - - /* - * Set up Event Queues (EQs) - */ - - /* Set up slow-path event queue */ - if (!phba->sli4_hba.sp_eq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0520 Slow-path EQ not allocated\n"); - goto out_error; - } - rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq, - LPFC_SP_DEF_IMAX); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0521 Failed setup of slow-path EQ: " - "rc = 0x%x\n", rc); - goto out_error; - } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2583 Slow-path EQ setup: queue-id=%d\n", - phba->sli4_hba.sp_eq->queue_id); - - /* Set up fast-path event queue */ - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { - if (!phba->sli4_hba.fp_eq[fcp_eqidx]) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0522 Fast-path EQ (%d) not " - "allocated\n", fcp_eqidx); - goto out_destroy_fp_eq; - } - rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx], - phba->cfg_fcp_imax); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0523 Failed setup of fast-path EQ " - "(%d), rc = 0x%x\n", fcp_eqidx, rc); - goto out_destroy_fp_eq; - } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2584 Fast-path EQ setup: " - "queue[%d]-id=%d\n", fcp_eqidx, - phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id); - } - - /* - * Set up Complete Queues (CQs) - */ - - /* Set up slow-path MBOX Complete Queue as the first CQ */ - if (!phba->sli4_hba.mbx_cq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0528 Mailbox CQ not allocated\n"); - goto out_destroy_fp_eq; - } - rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq, - LPFC_MCQ, LPFC_MBOX); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0529 Failed setup of slow-path mailbox CQ: " - "rc = 0x%x\n", rc); - goto out_destroy_fp_eq; - } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n", - phba->sli4_hba.mbx_cq->queue_id, - phba->sli4_hba.sp_eq->queue_id); - - /* Set up slow-path ELS Complete Queue */ - if (!phba->sli4_hba.els_cq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0530 ELS CQ not allocated\n"); - goto out_destroy_mbx_cq; - } - rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq, - LPFC_WCQ, LPFC_ELS); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0531 Failed setup of slow-path ELS CQ: " - "rc = 0x%x\n", rc); - goto out_destroy_mbx_cq; - } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n", - phba->sli4_hba.els_cq->queue_id, - phba->sli4_hba.sp_eq->queue_id); - - /* Set up slow-path Unsolicited Receive Complete Queue */ - if (!phba->sli4_hba.rxq_cq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0532 USOL RX CQ not allocated\n"); - goto out_destroy_els_cq; - } - rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq, - LPFC_RCQ, LPFC_USOL); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0533 Failed setup of slow-path USOL RX CQ: " - "rc = 0x%x\n", rc); - goto out_destroy_els_cq; - } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n", - phba->sli4_hba.rxq_cq->queue_id, - phba->sli4_hba.sp_eq->queue_id); - - /* Set up fast-path FCP Response Complete Queue */ - for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { - if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0526 Fast-path FCP CQ (%d) not " - "allocated\n", fcp_cqidx); - goto out_destroy_fcp_cq; - } - rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx], - phba->sli4_hba.fp_eq[fcp_cqidx], - LPFC_WCQ, LPFC_FCP); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0527 Failed setup of fast-path FCP " - "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); - goto out_destroy_fcp_cq; - } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2588 FCP CQ setup: cq[%d]-id=%d, " - "parent eq[%d]-id=%d\n", - fcp_cqidx, - phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id, - fcp_cqidx, - phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id); - } - - /* - * Set up all the Work Queues (WQs) - */ - - /* Set up Mailbox Command Queue */ - if (!phba->sli4_hba.mbx_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0538 Slow-path MQ not allocated\n"); - goto out_destroy_fcp_cq; - } - rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq, - phba->sli4_hba.mbx_cq, LPFC_MBOX); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0539 Failed setup of slow-path MQ: " - "rc = 0x%x\n", rc); - goto out_destroy_fcp_cq; - } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n", - phba->sli4_hba.mbx_wq->queue_id, - phba->sli4_hba.mbx_cq->queue_id); - - /* Set up slow-path ELS Work Queue */ - if (!phba->sli4_hba.els_wq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0536 Slow-path ELS WQ not allocated\n"); - goto out_destroy_mbx_wq; - } - rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq, - phba->sli4_hba.els_cq, LPFC_ELS); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0537 Failed setup of slow-path ELS WQ: " - "rc = 0x%x\n", rc); - goto out_destroy_mbx_wq; - } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n", - phba->sli4_hba.els_wq->queue_id, - phba->sli4_hba.els_cq->queue_id); - - /* Set up fast-path FCP Work Queue */ - for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) { - if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0534 Fast-path FCP WQ (%d) not " - "allocated\n", fcp_wqidx); - goto out_destroy_fcp_wq; - } - rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx], - phba->sli4_hba.fcp_cq[fcp_cq_index], - LPFC_FCP); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0535 Failed setup of fast-path FCP " - "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); - goto out_destroy_fcp_wq; - } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2591 FCP WQ setup: wq[%d]-id=%d, " - "parent cq[%d]-id=%d\n", - fcp_wqidx, - phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id, - fcp_cq_index, - phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id); - /* Round robin FCP Work Queue's Completion Queue assignment */ - fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count); - } - - /* - * Create Receive Queue (RQ) - */ - if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0540 Receive Queue not allocated\n"); - goto out_destroy_fcp_wq; - } - rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, - phba->sli4_hba.rxq_cq, LPFC_USOL); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0541 Failed setup of Receive Queue: " - "rc = 0x%x\n", rc); - goto out_destroy_fcp_wq; - } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " - "parent cq-id=%d\n", - phba->sli4_hba.hdr_rq->queue_id, - phba->sli4_hba.dat_rq->queue_id, - phba->sli4_hba.rxq_cq->queue_id); - return 0; - -out_destroy_fcp_wq: - for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) - lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]); - lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); -out_destroy_mbx_wq: - lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); -out_destroy_fcp_cq: - for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) - lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]); - lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); -out_destroy_els_cq: - lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); -out_destroy_mbx_cq: - lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); -out_destroy_fp_eq: - for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) - lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]); - lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); -out_error: - return rc; -} - -/** - * lpfc_sli4_queue_unset - Unset all the SLI4 queues - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to unset all the SLI4 queues with the FCoE HBA - * operation. - * - * Return codes - * 0 - sucessful - * ENOMEM - No availble memory - * EIO - The mailbox failed to complete successfully. - **/ -void -lpfc_sli4_queue_unset(struct lpfc_hba *phba) -{ - int fcp_qidx; - - /* Unset mailbox command work queue */ - lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq); - /* Unset ELS work queue */ - lpfc_wq_destroy(phba, phba->sli4_hba.els_wq); - /* Unset unsolicited receive queue */ - lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq); - /* Unset FCP work queue */ - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) - lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]); - /* Unset mailbox command complete queue */ - lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq); - /* Unset ELS complete queue */ - lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); - /* Unset unsolicited receive complete queue */ - lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq); - /* Unset FCP response complete queue */ - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) - lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); - /* Unset fast-path event queue */ - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) - lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); - /* Unset slow-path event queue */ - lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq); -} - -/** - * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to allocate and set up a pool of completion queue - * events. The body of the completion queue event is a completion queue entry - * CQE. For now, this pool is used for the interrupt service routine to queue - * the following HBA completion queue events for the worker thread to process: - * - Mailbox asynchronous events - * - Receive queue completion unsolicited events - * Later, this can be used for all the slow-path events. - * - * Return codes - * 0 - sucessful - * -ENOMEM - No availble memory - **/ -static int -lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba) -{ - struct lpfc_cq_event *cq_event; - int i; - - for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) { - cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL); - if (!cq_event) - goto out_pool_create_fail; - list_add_tail(&cq_event->list, - &phba->sli4_hba.sp_cqe_event_pool); - } - return 0; - -out_pool_create_fail: - lpfc_sli4_cq_event_pool_destroy(phba); - return -ENOMEM; -} - -/** - * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to free the pool of completion queue events at - * driver unload time. Note that, it is the responsibility of the driver - * cleanup routine to free all the outstanding completion-queue events - * allocated from this pool back into the pool before invoking this routine - * to destroy the pool. - **/ -static void -lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba) -{ - struct lpfc_cq_event *cq_event, *next_cq_event; - - list_for_each_entry_safe(cq_event, next_cq_event, - &phba->sli4_hba.sp_cqe_event_pool, list) { - list_del(&cq_event->list); - kfree(cq_event); - } -} - -/** - * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool - * @phba: pointer to lpfc hba data structure. - * - * This routine is the lock free version of the API invoked to allocate a - * completion-queue event from the free pool. - * - * Return: Pointer to the newly allocated completion-queue event if successful - * NULL otherwise. - **/ -struct lpfc_cq_event * -__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) -{ - struct lpfc_cq_event *cq_event = NULL; - - list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event, - struct lpfc_cq_event, list); - return cq_event; -} - -/** - * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool - * @phba: pointer to lpfc hba data structure. - * - * This routine is the lock version of the API invoked to allocate a - * completion-queue event from the free pool. - * - * Return: Pointer to the newly allocated completion-queue event if successful - * NULL otherwise. - **/ -struct lpfc_cq_event * -lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba) -{ - struct lpfc_cq_event *cq_event; - unsigned long iflags; - - spin_lock_irqsave(&phba->hbalock, iflags); - cq_event = __lpfc_sli4_cq_event_alloc(phba); - spin_unlock_irqrestore(&phba->hbalock, iflags); - return cq_event; -} - -/** - * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool - * @phba: pointer to lpfc hba data structure. - * @cq_event: pointer to the completion queue event to be freed. - * - * This routine is the lock free version of the API invoked to release a - * completion-queue event back into the free pool. - **/ -void -__lpfc_sli4_cq_event_release(struct lpfc_hba *phba, - struct lpfc_cq_event *cq_event) -{ - list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool); -} - -/** - * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool - * @phba: pointer to lpfc hba data structure. - * @cq_event: pointer to the completion queue event to be freed. - * - * This routine is the lock version of the API invoked to release a - * completion-queue event back into the free pool. - **/ -void -lpfc_sli4_cq_event_release(struct lpfc_hba *phba, - struct lpfc_cq_event *cq_event) -{ - unsigned long iflags; - spin_lock_irqsave(&phba->hbalock, iflags); - __lpfc_sli4_cq_event_release(phba, cq_event); - spin_unlock_irqrestore(&phba->hbalock, iflags); -} - -/** - * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool - * @phba: pointer to lpfc hba data structure. - * - * This routine is to free all the pending completion-queue events to the - * back into the free pool for device reset. - **/ -static void -lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) -{ - LIST_HEAD(cqelist); - struct lpfc_cq_event *cqe; - unsigned long iflags; - - /* Retrieve all the pending WCQEs from pending WCQE lists */ - spin_lock_irqsave(&phba->hbalock, iflags); - /* Pending FCP XRI abort events */ - list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, - &cqelist); - /* Pending ELS XRI abort events */ - list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, - &cqelist); - /* Pending asynnc events */ - list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, - &cqelist); - spin_unlock_irqrestore(&phba->hbalock, iflags); - - while (!list_empty(&cqelist)) { - list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list); - lpfc_sli4_cq_event_release(phba, cqe); - } -} - -/** - * lpfc_pci_function_reset - Reset pci function. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to request a PCI function reset. It will destroys - * all resources assigned to the PCI function which originates this request. - * - * Return codes - * 0 - sucessful - * ENOMEM - No availble memory - * EIO - The mailbox failed to complete successfully. - **/ -int -lpfc_pci_function_reset(struct lpfc_hba *phba) -{ - LPFC_MBOXQ_t *mboxq; - uint32_t rc = 0; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mboxq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0494 Unable to allocate memory for issuing " - "SLI_FUNCTION_RESET mailbox command\n"); - return -ENOMEM; - } - - /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */ - lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, - LPFC_MBOX_OPCODE_FUNCTION_RESET, 0, - LPFC_SLI4_MBX_EMBED); - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - shdr = (union lpfc_sli4_cfg_shdr *) - &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (rc != MBX_TIMEOUT) - mempool_free(mboxq, phba->mbox_mem_pool); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0495 SLI_FUNCTION_RESET mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); - rc = -ENXIO; - } - return rc; -} - -/** - * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands - * @phba: pointer to lpfc hba data structure. - * @cnt: number of nop mailbox commands to send. - * - * This routine is invoked to send a number @cnt of NOP mailbox command and - * wait for each command to complete. - * - * Return: the number of NOP mailbox command completed. - **/ -static int -lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) -{ - LPFC_MBOXQ_t *mboxq; - int length, cmdsent; - uint32_t mbox_tmo; - uint32_t rc = 0; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - if (cnt == 0) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "2518 Requested to send 0 NOP mailbox cmd\n"); - return cnt; - } - - mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mboxq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2519 Unable to allocate memory for issuing " - "NOP mailbox command\n"); - return 0; - } - - /* Set up NOP SLI4_CONFIG mailbox-ioctl command */ - length = (sizeof(struct lpfc_mbx_nop) - - sizeof(struct lpfc_sli4_cfg_mhdr)); - lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, - LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); - - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); - for (cmdsent = 0; cmdsent < cnt; cmdsent++) { - if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - else - rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); - if (rc == MBX_TIMEOUT) - break; - /* Check return status */ - shdr = (union lpfc_sli4_cfg_shdr *) - &mboxq->u.mqe.un.sli4_config.header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, - &shdr->response); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "2520 NOP mailbox command failed " - "status x%x add_status x%x mbx " - "status x%x\n", shdr_status, - shdr_add_status, rc); - break; - } - } - - if (rc != MBX_TIMEOUT) - mempool_free(mboxq, phba->mbox_mem_pool); - - return cmdsent; -} - -/** - * lpfc_sli4_fcfi_unreg - Unregister fcfi to device - * @phba: pointer to lpfc hba data structure. - * @fcfi: fcf index. - * - * This routine is invoked to unregister a FCFI from device. - **/ -void -lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi) -{ - LPFC_MBOXQ_t *mbox; - uint32_t mbox_tmo; - int rc; - unsigned long flags; - - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - - if (!mbox) - return; - - lpfc_unreg_fcfi(mbox, fcfi); - - if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - else { - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); - rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); - } - if (rc != MBX_TIMEOUT) - mempool_free(mbox, phba->mbox_mem_pool); - if (rc != MBX_SUCCESS) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2517 Unregister FCFI command failed " - "status %d, mbxStatus x%x\n", rc, - bf_get(lpfc_mqe_status, &mbox->u.mqe)); - else { - spin_lock_irqsave(&phba->hbalock, flags); - /* Mark the FCFI is no longer registered */ - phba->fcf.fcf_flag &= - ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED); - spin_unlock_irqrestore(&phba->hbalock, flags); - } -} - -/** - * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to set up the PCI device memory space for device - * with SLI-4 interface spec. - * - * Return codes - * 0 - sucessful - * other values - error - **/ -static int -lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) -{ - struct pci_dev *pdev; - unsigned long bar0map_len, bar1map_len, bar2map_len; - int error = -ENODEV; - - /* Obtain PCI device reference */ - if (!phba->pcidev) - return error; - else - pdev = phba->pcidev; - - /* Set the device DMA mask size */ - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) - return error; - - /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the - * number of bytes required by each mapping. They are actually - * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device. - */ - phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0); - bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0); - - phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1); - bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1); - - phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2); - bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2); - - /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */ - phba->sli4_hba.conf_regs_memmap_p = - ioremap(phba->pci_bar0_map, bar0map_len); - if (!phba->sli4_hba.conf_regs_memmap_p) { - dev_printk(KERN_ERR, &pdev->dev, - "ioremap failed for SLI4 PCI config registers.\n"); - goto out; - } - - /* Map SLI4 HBA Control Register base to a kernel virtual address. */ - phba->sli4_hba.ctrl_regs_memmap_p = - ioremap(phba->pci_bar1_map, bar1map_len); - if (!phba->sli4_hba.ctrl_regs_memmap_p) { - dev_printk(KERN_ERR, &pdev->dev, - "ioremap failed for SLI4 HBA control registers.\n"); - goto out_iounmap_conf; - } - - /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */ - phba->sli4_hba.drbl_regs_memmap_p = - ioremap(phba->pci_bar2_map, bar2map_len); - if (!phba->sli4_hba.drbl_regs_memmap_p) { - dev_printk(KERN_ERR, &pdev->dev, - "ioremap failed for SLI4 HBA doorbell registers.\n"); - goto out_iounmap_ctrl; - } - - /* Set up BAR0 PCI config space register memory map */ - lpfc_sli4_bar0_register_memmap(phba); - - /* Set up BAR1 register memory map */ - lpfc_sli4_bar1_register_memmap(phba); - - /* Set up BAR2 register memory map */ - error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); - if (error) - goto out_iounmap_all; - - return 0; - -out_iounmap_all: - iounmap(phba->sli4_hba.drbl_regs_memmap_p); -out_iounmap_ctrl: - iounmap(phba->sli4_hba.ctrl_regs_memmap_p); -out_iounmap_conf: - iounmap(phba->sli4_hba.conf_regs_memmap_p); -out: - return error; -} - -/** - * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to unset the PCI device memory space for device - * with SLI-4 interface spec. - **/ -static void -lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) -{ - struct pci_dev *pdev; - - /* Obtain PCI device reference */ - if (!phba->pcidev) - return; - else - pdev = phba->pcidev; - - /* Free coherent DMA memory allocated */ - - /* Unmap I/O memory space */ - iounmap(phba->sli4_hba.drbl_regs_memmap_p); - iounmap(phba->sli4_hba.ctrl_regs_memmap_p); - iounmap(phba->sli4_hba.conf_regs_memmap_p); - - return; -} - -/** - * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to enable the MSI-X interrupt vectors to device - * with SLI-3 interface specs. The kernel function pci_enable_msix() is - * called to enable the MSI-X vectors. Note that pci_enable_msix(), once - * invoked, enables either all or nothing, depending on the current - * availability of PCI vector resources. The device driver is responsible - * for calling the individual request_irq() to register each MSI-X vector - * with a interrupt handler, which is done in this function. Note that - * later when device is unloading, the driver should always call free_irq() - * on all MSI-X vectors it has done request_irq() on before calling - * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device - * will be left with MSI-X enabled and leaks its vectors. - * - * Return codes - * 0 - sucessful - * other values - error - **/ -static int -lpfc_sli_enable_msix(struct lpfc_hba *phba) -{ - int rc, i; - LPFC_MBOXQ_t *pmb; - - /* Set up MSI-X multi-message vectors */ - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - phba->msix_entries[i].entry = i; - - /* Configure MSI-X capability structure */ - rc = pci_enable_msix(phba->pcidev, phba->msix_entries, - ARRAY_SIZE(phba->msix_entries)); - if (rc) { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0420 PCI enable MSI-X failed (%d)\n", rc); - goto msi_fail_out; - } - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0477 MSI-X entry[%d]: vector=x%x " - "message=%d\n", i, - phba->msix_entries[i].vector, - phba->msix_entries[i].entry); - /* - * Assign MSI-X vectors to interrupt handlers - */ - - /* vector-0 is associated to slow-path handler */ - rc = request_irq(phba->msix_entries[0].vector, - &lpfc_sli_sp_intr_handler, IRQF_SHARED, - LPFC_SP_DRIVER_HANDLER_NAME, phba); - if (rc) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0421 MSI-X slow-path request_irq failed " - "(%d)\n", rc); - goto msi_fail_out; - } - - /* vector-1 is associated to fast-path handler */ - rc = request_irq(phba->msix_entries[1].vector, - &lpfc_sli_fp_intr_handler, IRQF_SHARED, - LPFC_FP_DRIVER_HANDLER_NAME, phba); - - if (rc) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0429 MSI-X fast-path request_irq failed " - "(%d)\n", rc); - goto irq_fail_out; - } - - /* - * Configure HBA MSI-X attention conditions to messages - */ - pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - - if (!pmb) { - rc = -ENOMEM; - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0474 Unable to allocate memory for issuing " - "MBOX_CONFIG_MSI command\n"); - goto mem_fail_out; - } - rc = lpfc_config_msi(phba, pmb); - if (rc) - goto mbx_fail_out; - rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL); - if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, - "0351 Config MSI mailbox command failed, " - "mbxCmd x%x, mbxStatus x%x\n", - pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus); - goto mbx_fail_out; - } - - /* Free memory allocated for mailbox command */ - mempool_free(pmb, phba->mbox_mem_pool); - return rc; - -mbx_fail_out: - /* Free memory allocated for mailbox command */ - mempool_free(pmb, phba->mbox_mem_pool); - -mem_fail_out: - /* free the irq already requested */ - free_irq(phba->msix_entries[1].vector, phba); - -irq_fail_out: - /* free the irq already requested */ - free_irq(phba->msix_entries[0].vector, phba); - -msi_fail_out: - /* Unconfigure MSI-X capability structure */ - pci_disable_msix(phba->pcidev); - return rc; -} - -/** - * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to release the MSI-X vectors and then disable the - * MSI-X interrupt mode to device with SLI-3 interface spec. - **/ -static void -lpfc_sli_disable_msix(struct lpfc_hba *phba) -{ - int i; - - /* Free up MSI-X multi-message vectors */ - for (i = 0; i < LPFC_MSIX_VECTORS; i++) - free_irq(phba->msix_entries[i].vector, phba); - /* Disable MSI-X */ - pci_disable_msix(phba->pcidev); - - return; -} - -/** - * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to enable the MSI interrupt mode to device with - * SLI-3 interface spec. The kernel function pci_enable_msi() is called to - * enable the MSI vector. The device driver is responsible for calling the - * request_irq() to register MSI vector with a interrupt the handler, which - * is done in this function. - * - * Return codes - * 0 - sucessful - * other values - error - */ -static int -lpfc_sli_enable_msi(struct lpfc_hba *phba) -{ - int rc; - - rc = pci_enable_msi(phba->pcidev); - if (!rc) - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0462 PCI enable MSI mode success.\n"); - else { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0471 PCI enable MSI mode failed (%d)\n", rc); - return rc; - } - - rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, - IRQF_SHARED, LPFC_DRIVER_NAME, phba); - if (rc) { - pci_disable_msi(phba->pcidev); - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0478 MSI request_irq failed (%d)\n", rc); - } - return rc; -} - -/** - * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to disable the MSI interrupt mode to device with - * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has - * done request_irq() on before calling pci_disable_msi(). Failure to do so - * results in a BUG_ON() and a device will be left with MSI enabled and leaks - * its vector. - */ -static void -lpfc_sli_disable_msi(struct lpfc_hba *phba) -{ - free_irq(phba->pcidev->irq, phba); - pci_disable_msi(phba->pcidev); - return; -} - -/** - * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to enable device interrupt and associate driver's - * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface - * spec. Depends on the interrupt mode configured to the driver, the driver - * will try to fallback from the configured interrupt mode to an interrupt - * mode which is supported by the platform, kernel, and device in the order - * of: - * MSI-X -> MSI -> IRQ. - * - * Return codes - * 0 - sucessful - * other values - error - **/ -static uint32_t -lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) -{ - uint32_t intr_mode = LPFC_INTR_ERROR; - int retval; - - if (cfg_mode == 2) { - /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */ - retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3); - if (!retval) { - /* Now, try to enable MSI-X interrupt mode */ - retval = lpfc_sli_enable_msix(phba); - if (!retval) { - /* Indicate initialization to MSI-X mode */ - phba->intr_type = MSIX; - intr_mode = 2; - } - } - } - - /* Fallback to MSI if MSI-X initialization failed */ - if (cfg_mode >= 1 && phba->intr_type == NONE) { - retval = lpfc_sli_enable_msi(phba); - if (!retval) { - /* Indicate initialization to MSI mode */ - phba->intr_type = MSI; - intr_mode = 1; - } - } - - /* Fallback to INTx if both MSI-X/MSI initalization failed */ - if (phba->intr_type == NONE) { - retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler, - IRQF_SHARED, LPFC_DRIVER_NAME, phba); - if (!retval) { - /* Indicate initialization to INTx mode */ - phba->intr_type = INTx; - intr_mode = 0; - } - } - return intr_mode; -} - -/** - * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to disable device interrupt and disassociate the - * driver's interrupt handler(s) from interrupt vector(s) to device with - * SLI-3 interface spec. Depending on the interrupt mode, the driver will - * release the interrupt vector(s) for the message signaled interrupt. - **/ -static void -lpfc_sli_disable_intr(struct lpfc_hba *phba) -{ - /* Disable the currently initialized interrupt mode */ - if (phba->intr_type == MSIX) - lpfc_sli_disable_msix(phba); - else if (phba->intr_type == MSI) - lpfc_sli_disable_msi(phba); - else if (phba->intr_type == INTx) - free_irq(phba->pcidev->irq, phba); - - /* Reset interrupt management states */ - phba->intr_type = NONE; - phba->sli.slistat.sli_intr = 0; - - return; -} - -/** - * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to enable the MSI-X interrupt vectors to device - * with SLI-4 interface spec. The kernel function pci_enable_msix() is called - * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, - * enables either all or nothing, depending on the current availability of - * PCI vector resources. The device driver is responsible for calling the - * individual request_irq() to register each MSI-X vector with a interrupt - * handler, which is done in this function. Note that later when device is - * unloading, the driver should always call free_irq() on all MSI-X vectors - * it has done request_irq() on before calling pci_disable_msix(). Failure - * to do so results in a BUG_ON() and a device will be left with MSI-X - * enabled and leaks its vectors. - * - * Return codes - * 0 - sucessful - * other values - error - **/ -static int -lpfc_sli4_enable_msix(struct lpfc_hba *phba) -{ - int rc, index; - - /* Set up MSI-X multi-message vectors */ - for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) - phba->sli4_hba.msix_entries[index].entry = index; - - /* Configure MSI-X capability structure */ - rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, - phba->sli4_hba.cfg_eqn); - if (rc) { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0484 PCI enable MSI-X failed (%d)\n", rc); - goto msi_fail_out; - } - /* Log MSI-X vector assignment */ - for (index = 0; index < phba->sli4_hba.cfg_eqn; index++) - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0489 MSI-X entry[%d]: vector=x%x " - "message=%d\n", index, - phba->sli4_hba.msix_entries[index].vector, - phba->sli4_hba.msix_entries[index].entry); - /* - * Assign MSI-X vectors to interrupt handlers - */ - - /* The first vector must associated to slow-path handler for MQ */ - rc = request_irq(phba->sli4_hba.msix_entries[0].vector, - &lpfc_sli4_sp_intr_handler, IRQF_SHARED, - LPFC_SP_DRIVER_HANDLER_NAME, phba); - if (rc) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0485 MSI-X slow-path request_irq failed " - "(%d)\n", rc); - goto msi_fail_out; - } - - /* The rest of the vector(s) are associated to fast-path handler(s) */ - for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) { - phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1; - phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba; - rc = request_irq(phba->sli4_hba.msix_entries[index].vector, - &lpfc_sli4_fp_intr_handler, IRQF_SHARED, - LPFC_FP_DRIVER_HANDLER_NAME, - &phba->sli4_hba.fcp_eq_hdl[index - 1]); - if (rc) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0486 MSI-X fast-path (%d) " - "request_irq failed (%d)\n", index, rc); - goto cfg_fail_out; - } - } - - return rc; - -cfg_fail_out: - /* free the irq already requested */ - for (--index; index >= 1; index--) - free_irq(phba->sli4_hba.msix_entries[index - 1].vector, - &phba->sli4_hba.fcp_eq_hdl[index - 1]); - - /* free the irq already requested */ - free_irq(phba->sli4_hba.msix_entries[0].vector, phba); - -msi_fail_out: - /* Unconfigure MSI-X capability structure */ - pci_disable_msix(phba->pcidev); - return rc; -} - -/** - * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to release the MSI-X vectors and then disable the - * MSI-X interrupt mode to device with SLI-4 interface spec. - **/ -static void -lpfc_sli4_disable_msix(struct lpfc_hba *phba) -{ - int index; - - /* Free up MSI-X multi-message vectors */ - free_irq(phba->sli4_hba.msix_entries[0].vector, phba); - - for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) - free_irq(phba->sli4_hba.msix_entries[index].vector, - &phba->sli4_hba.fcp_eq_hdl[index - 1]); - /* Disable MSI-X */ - pci_disable_msix(phba->pcidev); - - return; -} - -/** - * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to enable the MSI interrupt mode to device with - * SLI-4 interface spec. The kernel function pci_enable_msi() is called - * to enable the MSI vector. The device driver is responsible for calling - * the request_irq() to register MSI vector with a interrupt the handler, - * which is done in this function. - * - * Return codes - * 0 - sucessful - * other values - error - **/ -static int -lpfc_sli4_enable_msi(struct lpfc_hba *phba) -{ - int rc, index; - - rc = pci_enable_msi(phba->pcidev); - if (!rc) - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0487 PCI enable MSI mode success.\n"); - else { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0488 PCI enable MSI mode failed (%d)\n", rc); - return rc; - } - - rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, - IRQF_SHARED, LPFC_DRIVER_NAME, phba); - if (rc) { - pci_disable_msi(phba->pcidev); - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0490 MSI request_irq failed (%d)\n", rc); - } - - for (index = 0; index < phba->cfg_fcp_eq_count; index++) { - phba->sli4_hba.fcp_eq_hdl[index].idx = index; - phba->sli4_hba.fcp_eq_hdl[index].phba = phba; - } - - return rc; -} - -/** - * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to disable the MSI interrupt mode to device with - * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has - * done request_irq() on before calling pci_disable_msi(). Failure to do so - * results in a BUG_ON() and a device will be left with MSI enabled and leaks - * its vector. - **/ -static void -lpfc_sli4_disable_msi(struct lpfc_hba *phba) -{ - free_irq(phba->pcidev->irq, phba); - pci_disable_msi(phba->pcidev); - return; -} - -/** - * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to enable device interrupt and associate driver's - * interrupt handler(s) to interrupt vector(s) to device with SLI-4 - * interface spec. Depends on the interrupt mode configured to the driver, - * the driver will try to fallback from the configured interrupt mode to an - * interrupt mode which is supported by the platform, kernel, and device in - * the order of: - * MSI-X -> MSI -> IRQ. - * - * Return codes - * 0 - sucessful - * other values - error - **/ -static uint32_t -lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) -{ - uint32_t intr_mode = LPFC_INTR_ERROR; - int retval, index; - - if (cfg_mode == 2) { - /* Preparation before conf_msi mbox cmd */ - retval = 0; - if (!retval) { - /* Now, try to enable MSI-X interrupt mode */ - retval = lpfc_sli4_enable_msix(phba); - if (!retval) { - /* Indicate initialization to MSI-X mode */ - phba->intr_type = MSIX; - intr_mode = 2; - } - } - } - - /* Fallback to MSI if MSI-X initialization failed */ - if (cfg_mode >= 1 && phba->intr_type == NONE) { - retval = lpfc_sli4_enable_msi(phba); - if (!retval) { - /* Indicate initialization to MSI mode */ - phba->intr_type = MSI; - intr_mode = 1; - } - } - - /* Fallback to INTx if both MSI-X/MSI initalization failed */ - if (phba->intr_type == NONE) { - retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, - IRQF_SHARED, LPFC_DRIVER_NAME, phba); - if (!retval) { - /* Indicate initialization to INTx mode */ - phba->intr_type = INTx; - intr_mode = 0; - for (index = 0; index < phba->cfg_fcp_eq_count; - index++) { - phba->sli4_hba.fcp_eq_hdl[index].idx = index; - phba->sli4_hba.fcp_eq_hdl[index].phba = phba; - } - } - } - return intr_mode; -} - -/** - * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to disable device interrupt and disassociate - * the driver's interrupt handler(s) from interrupt vector(s) to device - * with SLI-4 interface spec. Depending on the interrupt mode, the driver - * will release the interrupt vector(s) for the message signaled interrupt. - **/ -static void -lpfc_sli4_disable_intr(struct lpfc_hba *phba) -{ - /* Disable the currently initialized interrupt mode */ - if (phba->intr_type == MSIX) - lpfc_sli4_disable_msix(phba); - else if (phba->intr_type == MSI) - lpfc_sli4_disable_msi(phba); - else if (phba->intr_type == INTx) - free_irq(phba->pcidev->irq, phba); - - /* Reset interrupt management states */ - phba->intr_type = NONE; - phba->sli.slistat.sli_intr = 0; - - return; -} - -/** - * lpfc_unset_hba - Unset SLI3 hba device initialization - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to unset the HBA device initialization steps to - * a device with SLI-3 interface spec. - **/ -static void -lpfc_unset_hba(struct lpfc_hba *phba) -{ - struct lpfc_vport *vport = phba->pport; - struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - - spin_lock_irq(shost->host_lock); - vport->load_flag |= FC_UNLOADING; - spin_unlock_irq(shost->host_lock); - - lpfc_stop_hba_timers(phba); - - phba->pport->work_port_events = 0; - - lpfc_sli_hba_down(phba); - - lpfc_sli_brdrestart(phba); - - lpfc_sli_disable_intr(phba); - - return; -} - -/** - * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to unset the HBA device initialization steps to - * a device with SLI-4 interface spec. - **/ -static void -lpfc_sli4_unset_hba(struct lpfc_hba *phba) -{ - struct lpfc_vport *vport = phba->pport; - struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - - spin_lock_irq(shost->host_lock); - vport->load_flag |= FC_UNLOADING; - spin_unlock_irq(shost->host_lock); - - phba->pport->work_port_events = 0; - - lpfc_sli4_hba_down(phba); - - lpfc_sli4_disable_intr(phba); - - return; -} - -/** - * lpfc_sli4_hba_unset - Unset the fcoe hba - * @phba: Pointer to HBA context object. - * - * This function is called in the SLI4 code path to reset the HBA's FCoE - * function. The caller is not required to hold any lock. This routine - * issues PCI function reset mailbox command to reset the FCoE function. - * At the end of the function, it calls lpfc_hba_down_post function to - * free any pending commands. - **/ -static void -lpfc_sli4_hba_unset(struct lpfc_hba *phba) -{ - int wait_cnt = 0; - LPFC_MBOXQ_t *mboxq; - - lpfc_stop_hba_timers(phba); - phba->sli4_hba.intr_enable = 0; - - /* - * Gracefully wait out the potential current outstanding asynchronous - * mailbox command. - */ - - /* First, block any pending async mailbox command from posted */ - spin_lock_irq(&phba->hbalock); - phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; - spin_unlock_irq(&phba->hbalock); - /* Now, trying to wait it out if we can */ - while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { - msleep(10); - if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT) - break; - } - /* Forcefully release the outstanding mailbox command if timed out */ - if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) { - spin_lock_irq(&phba->hbalock); - mboxq = phba->sli.mbox_active; - mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; - __lpfc_mbox_cmpl_put(phba, mboxq); - phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - phba->sli.mbox_active = NULL; - spin_unlock_irq(&phba->hbalock); - } - - /* Tear down the queues in the HBA */ - lpfc_sli4_queue_unset(phba); - - /* Disable PCI subsystem interrupt */ - lpfc_sli4_disable_intr(phba); - - /* Stop kthread signal shall trigger work_done one more time */ - kthread_stop(phba->worker_thread); - - /* Stop the SLI4 device port */ - phba->pport->work_port_events = 0; -} - -/** - * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem. - * @pdev: pointer to PCI device - * @pid: pointer to PCI device identifier - * - * This routine is to be called to attach a device with SLI-3 interface spec - * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is - * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific - * information of the device and driver to see if the driver state that it can - * support this kind of device. If the match is successful, the driver core - * invokes this routine. If this routine determines it can claim the HBA, it - * does all the initialization that it needs to do to handle the HBA properly. - * - * Return code - * 0 - driver can claim the device - * negative value - driver can not claim the device - **/ -static int __devinit -lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) -{ - struct lpfc_hba *phba; - struct lpfc_vport *vport = NULL; - int error; - uint32_t cfg_mode, intr_mode; - - /* Allocate memory for HBA structure */ - phba = lpfc_hba_alloc(pdev); - if (!phba) - return -ENOMEM; - - /* Perform generic PCI device enabling operation */ - error = lpfc_enable_pci_dev(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1401 Failed to enable pci device.\n"); - goto out_free_phba; - } - - /* Set up SLI API function jump table for PCI-device group-0 HBAs */ - error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); - if (error) - goto out_disable_pci_dev; - - /* Set up SLI-3 specific device PCI memory space */ - error = lpfc_sli_pci_mem_setup(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1402 Failed to set up pci memory space.\n"); - goto out_disable_pci_dev; - } - - /* Set up phase-1 common device driver resources */ - error = lpfc_setup_driver_resource_phase1(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1403 Failed to set up driver resource.\n"); - goto out_unset_pci_mem_s3; - } - - /* Set up SLI-3 specific device driver resources */ - error = lpfc_sli_driver_resource_setup(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1404 Failed to set up driver resource.\n"); - goto out_unset_pci_mem_s3; - } - - /* Initialize and populate the iocb list per host */ - error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1405 Failed to initialize iocb list.\n"); - goto out_unset_driver_resource_s3; - } - - /* Set up common device driver resources */ - error = lpfc_setup_driver_resource_phase2(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1406 Failed to set up driver resource.\n"); - goto out_free_iocb_list; - } - - /* Create SCSI host to the physical port */ - error = lpfc_create_shost(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1407 Failed to create scsi host.\n"); - goto out_unset_driver_resource; - } - - /* Configure sysfs attributes */ - vport = phba->pport; - error = lpfc_alloc_sysfs_attr(vport); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1476 Failed to allocate sysfs attr\n"); - goto out_destroy_shost; - } - - /* Now, trying to enable interrupt and bring up the device */ - cfg_mode = phba->cfg_use_msi; - while (true) { - /* Put device to a known state before enabling interrupt */ - lpfc_stop_port(phba); - /* Configure and enable interrupt */ - intr_mode = lpfc_sli_enable_intr(phba, cfg_mode); - if (intr_mode == LPFC_INTR_ERROR) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0431 Failed to enable interrupt.\n"); - error = -ENODEV; - goto out_free_sysfs_attr; - } - /* SLI-3 HBA setup */ - if (lpfc_sli_hba_setup(phba)) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1477 Failed to set up hba\n"); - error = -ENODEV; - goto out_remove_device; - } - - /* Wait 50ms for the interrupts of previous mailbox commands */ - msleep(50); - /* Check active interrupts on message signaled interrupts */ - if (intr_mode == 0 || - phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { - /* Log the current active interrupt mode */ - phba->intr_mode = intr_mode; - lpfc_log_intr_mode(phba, intr_mode); - break; - } else { - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0447 Configure interrupt mode (%d) " - "failed active interrupt test.\n", - intr_mode); - /* Disable the current interrupt mode */ - lpfc_sli_disable_intr(phba); - /* Try next level of interrupt mode */ - cfg_mode = --intr_mode; - } - } - - /* Perform post initialization setup */ - lpfc_post_init_setup(phba); - - /* Check if there are static vports to be created. */ - lpfc_create_static_vport(phba); - - return 0; - -out_remove_device: - lpfc_unset_hba(phba); -out_free_sysfs_attr: - lpfc_free_sysfs_attr(vport); -out_destroy_shost: - lpfc_destroy_shost(phba); -out_unset_driver_resource: - lpfc_unset_driver_resource_phase2(phba); -out_free_iocb_list: - lpfc_free_iocb_list(phba); -out_unset_driver_resource_s3: - lpfc_sli_driver_resource_unset(phba); -out_unset_pci_mem_s3: - lpfc_sli_pci_mem_unset(phba); -out_disable_pci_dev: - lpfc_disable_pci_dev(phba); -out_free_phba: - lpfc_hba_free(phba); - return error; -} - -/** - * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem. - * @pdev: pointer to PCI device - * - * This routine is to be called to disattach a device with SLI-3 interface - * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is - * removed from PCI bus, it performs all the necessary cleanup for the HBA - * device to be removed from the PCI subsystem properly. - **/ -static void __devexit -lpfc_pci_remove_one_s3(struct pci_dev *pdev) -{ - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; - struct lpfc_vport **vports; - struct lpfc_hba *phba = vport->phba; - int i; - int bars = pci_select_bars(pdev, IORESOURCE_MEM); - - spin_lock_irq(&phba->hbalock); - vport->load_flag |= FC_UNLOADING; - spin_unlock_irq(&phba->hbalock); - - lpfc_free_sysfs_attr(vport); - - /* Release all the vports against this physical port */ - vports = lpfc_create_vport_work_array(phba); - if (vports != NULL) - for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) - fc_vport_terminate(vports[i]->fc_vport); - lpfc_destroy_vport_work_array(phba, vports); - - /* Remove FC host and then SCSI host with the physical port */ - fc_remove_host(shost); - scsi_remove_host(shost); - lpfc_cleanup(vport); - - /* - * Bring down the SLI Layer. This step disable all interrupts, - * clears the rings, discards all mailbox commands, and resets - * the HBA. - */ - - /* HBA interrupt will be diabled after this call */ - lpfc_sli_hba_down(phba); - /* Stop kthread signal shall trigger work_done one more time */ - kthread_stop(phba->worker_thread); - /* Final cleanup of txcmplq and reset the HBA */ - lpfc_sli_brdrestart(phba); - - lpfc_stop_hba_timers(phba); - spin_lock_irq(&phba->hbalock); - list_del_init(&vport->listentry); - spin_unlock_irq(&phba->hbalock); - - lpfc_debugfs_terminate(vport); - - /* Disable interrupt */ - lpfc_sli_disable_intr(phba); - - pci_set_drvdata(pdev, NULL); - scsi_host_put(shost); - - /* - * Call scsi_free before mem_free since scsi bufs are released to their - * corresponding pools here. - */ - lpfc_scsi_free(phba); - lpfc_mem_free_all(phba); - - dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), - phba->hbqslimp.virt, phba->hbqslimp.phys); - - /* Free resources associated with SLI2 interface */ - dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, - phba->slim2p.virt, phba->slim2p.phys); - - /* unmap adapter SLIM and Control Registers */ - iounmap(phba->ctrl_regs_memmap_p); - iounmap(phba->slim_memmap_p); - - lpfc_hba_free(phba); - - pci_release_selected_regions(pdev, bars); - pci_disable_device(pdev); -} - -/** - * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt - * @pdev: pointer to PCI device - * @msg: power management message - * - * This routine is to be called from the kernel's PCI subsystem to support - * system Power Management (PM) to device with SLI-3 interface spec. When - * PM invokes this method, it quiesces the device by stopping the driver's - * worker thread for the device, turning off device's interrupt and DMA, - * and bring the device offline. Note that as the driver implements the - * minimum PM requirements to a power-aware driver's PM support for the - * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) - * to the suspend() method call will be treated as SUSPEND and the driver will - * fully reinitialize its device during resume() method call, the driver will - * set device to PCI_D3hot state in PCI config space instead of setting it - * according to the @msg provided by the PM. - * - * Return code - * 0 - driver suspended the device - * Error otherwise - **/ -static int -lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg) -{ - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0473 PCI device Power Management suspend.\n"); - - /* Bring down the device */ - lpfc_offline_prep(phba); - lpfc_offline(phba); - kthread_stop(phba->worker_thread); - - /* Disable interrupt from device */ - lpfc_sli_disable_intr(phba); - - /* Save device state to PCI config space */ - pci_save_state(pdev); - pci_set_power_state(pdev, PCI_D3hot); - - return 0; -} - -/** - * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt - * @pdev: pointer to PCI device - * - * This routine is to be called from the kernel's PCI subsystem to support - * system Power Management (PM) to device with SLI-3 interface spec. When PM - * invokes this method, it restores the device's PCI config space state and - * fully reinitializes the device and brings it online. Note that as the - * driver implements the minimum PM requirements to a power-aware driver's - * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, - * FREEZE) to the suspend() method call will be treated as SUSPEND and the - * driver will fully reinitialize its device during resume() method call, - * the device will be set to PCI_D0 directly in PCI config space before - * restoring the state. - * - * Return code - * 0 - driver suspended the device - * Error otherwise - **/ -static int -lpfc_pci_resume_one_s3(struct pci_dev *pdev) -{ - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - uint32_t intr_mode; - int error; - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0452 PCI device Power Management resume.\n"); - - /* Restore device state from PCI config space */ - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - if (pdev->is_busmaster) - pci_set_master(pdev); - - /* Startup the kernel thread for this host adapter. */ - phba->worker_thread = kthread_run(lpfc_do_work, phba, - "lpfc_worker_%d", phba->brd_no); - if (IS_ERR(phba->worker_thread)) { - error = PTR_ERR(phba->worker_thread); - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0434 PM resume failed to start worker " - "thread: error=x%x.\n", error); - return error; - } - - /* Configure and enable interrupt */ - intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); - if (intr_mode == LPFC_INTR_ERROR) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0430 PM resume Failed to enable interrupt\n"); - return -EIO; - } else - phba->intr_mode = intr_mode; - - /* Restart HBA and bring it online */ - lpfc_sli_brdrestart(phba); - lpfc_online(phba); - - /* Log the current active interrupt mode */ - lpfc_log_intr_mode(phba, phba->intr_mode); - - return 0; -} - -/** - * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error - * @pdev: pointer to PCI device. - * @state: the current PCI connection state. - * - * This routine is called from the PCI subsystem for I/O error handling to - * device with SLI-3 interface spec. This function is called by the PCI - * subsystem after a PCI bus error affecting this device has been detected. - * When this function is invoked, it will need to stop all the I/Os and - * interrupt(s) to the device. Once that is done, it will return - * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery - * as desired. - * - * Return codes - * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery - * PCI_ERS_RESULT_DISCONNECT - device could not be recovered - **/ -static pci_ers_result_t -lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state) -{ - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - struct lpfc_sli *psli = &phba->sli; - struct lpfc_sli_ring *pring; - - if (state == pci_channel_io_perm_failure) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0472 PCI channel I/O permanent failure\n"); - /* Block all SCSI devices' I/Os on the host */ - lpfc_scsi_dev_block(phba); - /* Clean up all driver's outstanding SCSI I/Os */ - lpfc_sli_flush_fcp_rings(phba); - return PCI_ERS_RESULT_DISCONNECT; - } - - pci_disable_device(pdev); - /* - * There may be I/Os dropped by the firmware. - * Error iocb (I/O) on txcmplq and let the SCSI layer - * retry it after re-establishing link. - */ - pring = &psli->ring[psli->fcp_ring]; - lpfc_sli_abort_iocb_ring(phba, pring); - - /* Disable interrupt */ - lpfc_sli_disable_intr(phba); - - /* Request a slot reset. */ - return PCI_ERS_RESULT_NEED_RESET; -} - -/** - * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch. - * @pdev: pointer to PCI device. - * - * This routine is called from the PCI subsystem for error handling to - * device with SLI-3 interface spec. This is called after PCI bus has been - * reset to restart the PCI card from scratch, as if from a cold-boot. - * During the PCI subsystem error recovery, after driver returns - * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error - * recovery and then call this routine before calling the .resume method - * to recover the device. This function will initialize the HBA device, - * enable the interrupt, but it will just put the HBA to offline state - * without passing any I/O traffic. - * - * Return codes - * PCI_ERS_RESULT_RECOVERED - the device has been recovered - * PCI_ERS_RESULT_DISCONNECT - device could not be recovered - */ -static pci_ers_result_t -lpfc_io_slot_reset_s3(struct pci_dev *pdev) -{ - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - struct lpfc_sli *psli = &phba->sli; - uint32_t intr_mode; - - dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); - if (pci_enable_device_mem(pdev)) { - printk(KERN_ERR "lpfc: Cannot re-enable " - "PCI device after reset.\n"); - return PCI_ERS_RESULT_DISCONNECT; - } - - pci_restore_state(pdev); - if (pdev->is_busmaster) - pci_set_master(pdev); - - spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI_ACTIVE; - spin_unlock_irq(&phba->hbalock); - - /* Configure and enable interrupt */ - intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode); - if (intr_mode == LPFC_INTR_ERROR) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0427 Cannot re-enable interrupt after " - "slot reset.\n"); - return PCI_ERS_RESULT_DISCONNECT; - } else - phba->intr_mode = intr_mode; - - /* Take device offline; this will perform cleanup */ - lpfc_offline(phba); - lpfc_sli_brdrestart(phba); - - /* Log the current active interrupt mode */ - lpfc_log_intr_mode(phba, phba->intr_mode); - - return PCI_ERS_RESULT_RECOVERED; -} - -/** - * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device. - * @pdev: pointer to PCI device - * - * This routine is called from the PCI subsystem for error handling to device - * with SLI-3 interface spec. It is called when kernel error recovery tells - * the lpfc driver that it is ok to resume normal PCI operation after PCI bus - * error recovery. After this call, traffic can start to flow from this device - * again. - */ -static void -lpfc_io_resume_s3(struct pci_dev *pdev) -{ - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - - lpfc_online(phba); -} - -/** - * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve - * @phba: pointer to lpfc hba data structure. - * - * returns the number of ELS/CT IOCBs to reserve - **/ -int -lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba) -{ - int max_xri = phba->sli4_hba.max_cfg_param.max_xri; - - if (max_xri <= 100) - return 4; - else if (max_xri <= 256) - return 8; - else if (max_xri <= 512) - return 16; - else if (max_xri <= 1024) - return 32; - else - return 48; -} - -/** - * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys - * @pdev: pointer to PCI device - * @pid: pointer to PCI device identifier - * - * This routine is called from the kernel's PCI subsystem to device with - * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is - * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific - * information of the device and driver to see if the driver state that it - * can support this kind of device. If the match is successful, the driver - * core invokes this routine. If this routine determines it can claim the HBA, - * it does all the initialization that it needs to do to handle the HBA - * properly. - * - * Return code - * 0 - driver can claim the device - * negative value - driver can not claim the device - **/ -static int __devinit -lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) -{ - struct lpfc_hba *phba; - struct lpfc_vport *vport = NULL; - int error; - uint32_t cfg_mode, intr_mode; - int mcnt; - - /* Allocate memory for HBA structure */ - phba = lpfc_hba_alloc(pdev); - if (!phba) - return -ENOMEM; - - /* Perform generic PCI device enabling operation */ - error = lpfc_enable_pci_dev(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1409 Failed to enable pci device.\n"); - goto out_free_phba; - } - - /* Set up SLI API function jump table for PCI-device group-1 HBAs */ - error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); - if (error) - goto out_disable_pci_dev; - - /* Set up SLI-4 specific device PCI memory space */ - error = lpfc_sli4_pci_mem_setup(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1410 Failed to set up pci memory space.\n"); - goto out_disable_pci_dev; - } - - /* Set up phase-1 common device driver resources */ - error = lpfc_setup_driver_resource_phase1(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1411 Failed to set up driver resource.\n"); - goto out_unset_pci_mem_s4; - } - - /* Set up SLI-4 Specific device driver resources */ - error = lpfc_sli4_driver_resource_setup(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1412 Failed to set up driver resource.\n"); - goto out_unset_pci_mem_s4; - } - - /* Initialize and populate the iocb list per host */ - error = lpfc_init_iocb_list(phba, - phba->sli4_hba.max_cfg_param.max_xri); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1413 Failed to initialize iocb list.\n"); - goto out_unset_driver_resource_s4; - } + shost = lpfc_shost_from_vport(vport); + phba->pport = vport; + lpfc_debugfs_initialize(vport); - /* Set up common device driver resources */ - error = lpfc_setup_driver_resource_phase2(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1414 Failed to set up driver resource.\n"); - goto out_free_iocb_list; - } + pci_set_drvdata(pdev, shost); - /* Create SCSI host to the physical port */ - error = lpfc_create_shost(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1415 Failed to create scsi host.\n"); - goto out_unset_driver_resource; - } + phba->MBslimaddr = phba->slim_memmap_p; + phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET; + phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET; + phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET; + phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET; /* Configure sysfs attributes */ - vport = phba->pport; - error = lpfc_alloc_sysfs_attr(vport); - if (error) { + if (lpfc_alloc_sysfs_attr(vport)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1416 Failed to allocate sysfs attr\n"); - goto out_destroy_shost; + "1476 Failed to allocate sysfs attr\n"); + error = -ENOMEM; + goto out_destroy_port; } - /* Now, trying to enable interrupt and bring up the device */ cfg_mode = phba->cfg_use_msi; while (true) { - /* Put device to a known state before enabling interrupt */ - lpfc_stop_port(phba); /* Configure and enable interrupt */ - intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); + intr_mode = lpfc_enable_intr(phba, cfg_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0426 Failed to enable interrupt.\n"); - error = -ENODEV; goto out_free_sysfs_attr; } - /* Set up SLI-4 HBA */ - if (lpfc_sli4_hba_setup(phba)) { + /* HBA SLI setup */ + if (lpfc_sli_hba_setup(phba)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1421 Failed to set up hba\n"); + "1477 Failed to set up hba\n"); error = -ENODEV; - goto out_disable_intr; + goto out_remove_device; } - /* Send NOP mbx cmds for non-INTx mode active interrupt test */ - if (intr_mode != 0) - mcnt = lpfc_sli4_send_nop_mbox_cmds(phba, - LPFC_ACT_INTR_CNT); - - /* Check active interrupts received only for MSI/MSI-X */ - if (intr_mode == 0 || - phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) { + /* Wait 50ms for the interrupts of previous mailbox commands */ + msleep(50); + /* Check active interrupts received */ + if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) { /* Log the current active interrupt mode */ phba->intr_mode = intr_mode; lpfc_log_intr_mode(phba, intr_mode); break; + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0451 Configure interrupt mode (%d) " + "failed active interrupt test.\n", + intr_mode); + if (intr_mode == 0) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0479 Failed to enable " + "interrupt.\n"); + error = -ENODEV; + goto out_remove_device; + } + /* Stop HBA SLI setups */ + lpfc_stop_port(phba); + /* Disable the current interrupt mode */ + lpfc_disable_intr(phba); + /* Try next level of interrupt mode */ + cfg_mode = --intr_mode; } - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0451 Configure interrupt mode (%d) " - "failed active interrupt test.\n", - intr_mode); - /* Unset the preivous SLI-4 HBA setup */ - lpfc_sli4_unset_hba(phba); - /* Try next level of interrupt mode */ - cfg_mode = --intr_mode; } - /* Perform post initialization setup */ - lpfc_post_init_setup(phba); + /* + * hba setup may have changed the hba_queue_depth so we need to adjust + * the value of can_queue. + */ + shost->can_queue = phba->cfg_hba_queue_depth - 10; + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { + + if (lpfc_prot_mask && lpfc_prot_guard) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "1478 Registering BlockGuard with the " + "SCSI layer\n"); + + scsi_host_set_prot(shost, lpfc_prot_mask); + scsi_host_set_guard(shost, lpfc_prot_guard); + } + } + + if (!_dump_buf_data) { + int pagecnt = 10; + while (pagecnt) { + spin_lock_init(&_dump_buf_lock); + _dump_buf_data = + (char *) __get_free_pages(GFP_KERNEL, pagecnt); + if (_dump_buf_data) { + printk(KERN_ERR "BLKGRD allocated %d pages for " + "_dump_buf_data at 0x%p\n", + (1 << pagecnt), _dump_buf_data); + _dump_buf_data_order = pagecnt; + memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT) + << pagecnt)); + break; + } else { + --pagecnt; + } + + } + + if (!_dump_buf_data_order) + printk(KERN_ERR "BLKGRD ERROR unable to allocate " + "memory for hexdump\n"); + + } else { + printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p" + "\n", _dump_buf_data); + } + + + if (!_dump_buf_dif) { + int pagecnt = 10; + while (pagecnt) { + _dump_buf_dif = + (char *) __get_free_pages(GFP_KERNEL, pagecnt); + if (_dump_buf_dif) { + printk(KERN_ERR "BLKGRD allocated %d pages for " + "_dump_buf_dif at 0x%p\n", + (1 << pagecnt), _dump_buf_dif); + _dump_buf_dif_order = pagecnt; + memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT) + << pagecnt)); + break; + } else { + --pagecnt; + } + + } + + if (!_dump_buf_dif_order) + printk(KERN_ERR "BLKGRD ERROR unable to allocate " + "memory for hexdump\n"); + + } else { + printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n", + _dump_buf_dif); + } + + lpfc_host_attrib_init(shost); + + if (phba->cfg_poll & DISABLE_FCP_RING_INT) { + spin_lock_irq(shost->host_lock); + lpfc_poll_start_timer(phba); + spin_unlock_irq(shost->host_lock); + } + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0428 Perform SCSI scan\n"); + /* Send board arrival event to upper layer */ + adapter_event.event_type = FC_REG_ADAPTER_EVENT; + adapter_event.subcategory = LPFC_EVENT_ARRIVAL; + fc_host_post_vendor_event(shost, fc_get_event_number(), + sizeof(adapter_event), + (char *) &adapter_event, + LPFC_NL_VENDOR_ID); return 0; -out_disable_intr: - lpfc_sli4_disable_intr(phba); +out_remove_device: + spin_lock_irq(shost->host_lock); + vport->load_flag |= FC_UNLOADING; + spin_unlock_irq(shost->host_lock); + lpfc_stop_phba_timers(phba); + phba->pport->work_port_events = 0; + lpfc_disable_intr(phba); + lpfc_sli_hba_down(phba); + lpfc_sli_brdrestart(phba); out_free_sysfs_attr: lpfc_free_sysfs_attr(vport); -out_destroy_shost: - lpfc_destroy_shost(phba); -out_unset_driver_resource: - lpfc_unset_driver_resource_phase2(phba); -out_free_iocb_list: - lpfc_free_iocb_list(phba); -out_unset_driver_resource_s4: - lpfc_sli4_driver_resource_unset(phba); -out_unset_pci_mem_s4: - lpfc_sli4_pci_mem_unset(phba); -out_disable_pci_dev: - lpfc_disable_pci_dev(phba); +out_destroy_port: + destroy_port(vport); +out_kthread_stop: + kthread_stop(phba->worker_thread); +out_free_iocbq: + list_for_each_entry_safe(iocbq_entry, iocbq_next, + &phba->lpfc_iocb_list, list) { + kfree(iocbq_entry); + phba->total_iocbq_bufs--; + } + lpfc_mem_free(phba); +out_free_hbqslimp: + dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), + phba->hbqslimp.virt, phba->hbqslimp.phys); +out_free_slim: + dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, + phba->slim2p.virt, phba->slim2p.phys); +out_iounmap: + iounmap(phba->ctrl_regs_memmap_p); +out_iounmap_slim: + iounmap(phba->slim_memmap_p); +out_idr_remove: + idr_remove(&lpfc_hba_index, phba->brd_no); out_free_phba: - lpfc_hba_free(phba); + kfree(phba); +out_release_regions: + pci_release_selected_regions(pdev, bars); +out_disable_device: + pci_disable_device(pdev); +out: + pci_set_drvdata(pdev, NULL); + if (shost) + scsi_host_put(shost); return error; } /** - * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem + * lpfc_pci_remove_one - lpfc PCI func to unregister device from PCI subsystem * @pdev: pointer to PCI device * - * This routine is called from the kernel's PCI subsystem to device with - * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is - * removed from PCI bus, it performs all the necessary cleanup for the HBA - * device to be removed from the PCI subsystem properly. + * This routine is to be registered to the kernel's PCI subsystem. When an + * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup + * for the HBA device to be removed from the PCI subsystem properly. **/ static void __devexit -lpfc_pci_remove_one_s4(struct pci_dev *pdev) +lpfc_pci_remove_one(struct pci_dev *pdev) { - struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_vport **vports; - struct lpfc_hba *phba = vport->phba; + struct lpfc_hba *phba = vport->phba; int i; + int bars = pci_select_bars(pdev, IORESOURCE_MEM); - /* Mark the device unloading flag */ spin_lock_irq(&phba->hbalock); vport->load_flag |= FC_UNLOADING; spin_unlock_irq(&phba->hbalock); - /* Free the HBA sysfs attributes */ lpfc_free_sysfs_attr(vport); /* Release all the vports against this physical port */ vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++) + for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++) fc_vport_terminate(vports[i]->fc_vport); lpfc_destroy_vport_work_array(phba, vports); /* Remove FC host and then SCSI host with the physical port */ fc_remove_host(shost); scsi_remove_host(shost); - - /* Perform cleanup on the physical port */ lpfc_cleanup(vport); /* - * Bring down the SLI Layer. This step disables all interrupts, + * Bring down the SLI Layer. This step disable all interrupts, * clears the rings, discards all mailbox commands, and resets - * the HBA FCoE function. + * the HBA. */ - lpfc_debugfs_terminate(vport); - lpfc_sli4_hba_unset(phba); + /* HBA interrupt will be diabled after this call */ + lpfc_sli_hba_down(phba); + /* Stop kthread signal shall trigger work_done one more time */ + kthread_stop(phba->worker_thread); + /* Final cleanup of txcmplq and reset the HBA */ + lpfc_sli_brdrestart(phba); + + lpfc_stop_phba_timers(phba); spin_lock_irq(&phba->hbalock); list_del_init(&vport->listentry); spin_unlock_irq(&phba->hbalock); - /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi - * buffers are released to their corresponding pools here. + lpfc_debugfs_terminate(vport); + + /* Disable interrupt */ + lpfc_disable_intr(phba); + + pci_set_drvdata(pdev, NULL); + scsi_host_put(shost); + + /* + * Call scsi_free before mem_free since scsi bufs are released to their + * corresponding pools here. */ lpfc_scsi_free(phba); - lpfc_sli4_driver_resource_unset(phba); + lpfc_mem_free(phba); - /* Unmap adapter Control and Doorbell registers */ - lpfc_sli4_pci_mem_unset(phba); + dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(), + phba->hbqslimp.virt, phba->hbqslimp.phys); - /* Release PCI resources and disable device's PCI function */ - scsi_host_put(shost); - lpfc_disable_pci_dev(phba); + /* Free resources associated with SLI2 interface */ + dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE, + phba->slim2p.virt, phba->slim2p.phys); + + /* unmap adapter SLIM and Control Registers */ + iounmap(phba->ctrl_regs_memmap_p); + iounmap(phba->slim_memmap_p); + + idr_remove(&lpfc_hba_index, phba->brd_no); - /* Finally, free the driver's device data structure */ - lpfc_hba_free(phba); + kfree(phba); - return; + pci_release_selected_regions(pdev, bars); + pci_disable_device(pdev); } /** - * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt + * lpfc_pci_suspend_one - lpfc PCI func to suspend device for power management * @pdev: pointer to PCI device * @msg: power management message * - * This routine is called from the kernel's PCI subsystem to support system - * Power Management (PM) to device with SLI-4 interface spec. When PM invokes - * this method, it quiesces the device by stopping the driver's worker - * thread for the device, turning off device's interrupt and DMA, and bring - * the device offline. Note that as the driver implements the minimum PM - * requirements to a power-aware driver's PM support for suspend/resume -- all - * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() - * method call will be treated as SUSPEND and the driver will fully - * reinitialize its device during resume() method call, the driver will set - * device to PCI_D3hot state in PCI config space instead of setting it - * according to the @msg provided by the PM. + * This routine is to be registered to the kernel's PCI subsystem to support + * system Power Management (PM). When PM invokes this method, it quiesces the + * device by stopping the driver's worker thread for the device, turning off + * device's interrupt and DMA, and bring the device offline. Note that as the + * driver implements the minimum PM requirements to a power-aware driver's PM + * support for suspend/resume -- all the possible PM messages (SUSPEND, + * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND + * and the driver will fully reinitialize its device during resume() method + * call, the driver will set device to PCI_D3hot state in PCI config space + * instead of setting it according to the @msg provided by the PM. * * Return code - * 0 - driver suspended the device - * Error otherwise + * 0 - driver suspended the device + * Error otherwise **/ static int -lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) +lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0298 PCI device Power Management suspend.\n"); + "0473 PCI device Power Management suspend.\n"); /* Bring down the device */ lpfc_offline_prep(phba); @@ -7476,7 +3194,7 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) kthread_stop(phba->worker_thread); /* Disable interrupt from device */ - lpfc_sli4_disable_intr(phba); + lpfc_disable_intr(phba); /* Save device state to PCI config space */ pci_save_state(pdev); @@ -7486,26 +3204,25 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) } /** - * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt + * lpfc_pci_resume_one - lpfc PCI func to resume device for power management * @pdev: pointer to PCI device * - * This routine is called from the kernel's PCI subsystem to support system - * Power Management (PM) to device with SLI-4 interface spac. When PM invokes - * this method, it restores the device's PCI config space state and fully - * reinitializes the device and brings it online. Note that as the driver - * implements the minimum PM requirements to a power-aware driver's PM for - * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE) - * to the suspend() method call will be treated as SUSPEND and the driver - * will fully reinitialize its device during resume() method call, the device - * will be set to PCI_D0 directly in PCI config space before restoring the - * state. + * This routine is to be registered to the kernel's PCI subsystem to support + * system Power Management (PM). When PM invokes this method, it restores + * the device's PCI config space state and fully reinitializes the device + * and brings it online. Note that as the driver implements the minimum PM + * requirements to a power-aware driver's PM for suspend/resume -- all + * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend() + * method call will be treated as SUSPEND and the driver will fully + * reinitialize its device during resume() method call, the device will be + * set to PCI_D0 directly in PCI config space before restoring the state. * * Return code - * 0 - driver suspended the device - * Error otherwise + * 0 - driver suspended the device + * Error otherwise **/ static int -lpfc_pci_resume_one_s4(struct pci_dev *pdev) +lpfc_pci_resume_one(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; @@ -7513,7 +3230,7 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev) int error; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0292 PCI device Power Management resume.\n"); + "0452 PCI device Power Management resume.\n"); /* Restore device state from PCI config space */ pci_set_power_state(pdev, PCI_D0); @@ -7521,22 +3238,22 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev) if (pdev->is_busmaster) pci_set_master(pdev); - /* Startup the kernel thread for this host adapter. */ + /* Startup the kernel thread for this host adapter. */ phba->worker_thread = kthread_run(lpfc_do_work, phba, "lpfc_worker_%d", phba->brd_no); if (IS_ERR(phba->worker_thread)) { error = PTR_ERR(phba->worker_thread); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0293 PM resume failed to start worker " + "0434 PM resume failed to start worker " "thread: error=x%x.\n", error); return error; } /* Configure and enable interrupt */ - intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode); + intr_mode = lpfc_enable_intr(phba, phba->intr_mode); if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0294 PM resume Failed to enable interrupt\n"); + "0430 PM resume Failed to enable interrupt\n"); return -EIO; } else phba->intr_mode = intr_mode; @@ -7552,316 +3269,129 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev) } /** - * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device + * lpfc_io_error_detected - Driver method for handling PCI I/O error detected * @pdev: pointer to PCI device. * @state: the current PCI connection state. * - * This routine is called from the PCI subsystem for error handling to device - * with SLI-4 interface spec. This function is called by the PCI subsystem - * after a PCI bus error affecting this device has been detected. When this - * function is invoked, it will need to stop all the I/Os and interrupt(s) - * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET - * for the PCI subsystem to perform proper recovery as desired. - * - * Return codes - * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery - * PCI_ERS_RESULT_DISCONNECT - device could not be recovered - **/ -static pci_ers_result_t -lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state) -{ - return PCI_ERS_RESULT_NEED_RESET; -} - -/** - * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch - * @pdev: pointer to PCI device. - * - * This routine is called from the PCI subsystem for error handling to device - * with SLI-4 interface spec. It is called after PCI bus has been reset to - * restart the PCI card from scratch, as if from a cold-boot. During the - * PCI subsystem error recovery, after the driver returns - * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error - * recovery and then call this routine before calling the .resume method to - * recover the device. This function will initialize the HBA device, enable - * the interrupt, but it will just put the HBA to offline state without - * passing any I/O traffic. + * This routine is registered to the PCI subsystem for error handling. This + * function is called by the PCI subsystem after a PCI bus error affecting + * this device has been detected. When this function is invoked, it will + * need to stop all the I/Os and interrupt(s) to the device. Once that is + * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to + * perform proper recovery as desired. * * Return codes - * PCI_ERS_RESULT_RECOVERED - the device has been recovered - * PCI_ERS_RESULT_DISCONNECT - device could not be recovered - */ -static pci_ers_result_t -lpfc_io_slot_reset_s4(struct pci_dev *pdev) -{ - return PCI_ERS_RESULT_RECOVERED; -} - -/** - * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device - * @pdev: pointer to PCI device - * - * This routine is called from the PCI subsystem for error handling to device - * with SLI-4 interface spec. It is called when kernel error recovery tells - * the lpfc driver that it is ok to resume normal PCI operation after PCI bus - * error recovery. After this call, traffic can start to flow from this device - * again. - **/ -static void -lpfc_io_resume_s4(struct pci_dev *pdev) -{ - return; -} - -/** - * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem - * @pdev: pointer to PCI device - * @pid: pointer to PCI device identifier - * - * This routine is to be registered to the kernel's PCI subsystem. When an - * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks - * at PCI device-specific information of the device and driver to see if the - * driver state that it can support this kind of device. If the match is - * successful, the driver core invokes this routine. This routine dispatches - * the action to the proper SLI-3 or SLI-4 device probing routine, which will - * do all the initialization that it needs to do to handle the HBA device - * properly. - * - * Return code - * 0 - driver can claim the device - * negative value - driver can not claim the device - **/ -static int __devinit -lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid) -{ - int rc; - uint16_t dev_id; - - if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id)) - return -ENODEV; - - switch (dev_id) { - case PCI_DEVICE_ID_TIGERSHARK: - case PCI_DEVICE_ID_TIGERSHARK_S: - rc = lpfc_pci_probe_one_s4(pdev, pid); - break; - default: - rc = lpfc_pci_probe_one_s3(pdev, pid); - break; - } - return rc; -} - -/** - * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem - * @pdev: pointer to PCI device - * - * This routine is to be registered to the kernel's PCI subsystem. When an - * Emulex HBA is removed from PCI bus, the driver core invokes this routine. - * This routine dispatches the action to the proper SLI-3 or SLI-4 device - * remove routine, which will perform all the necessary cleanup for the - * device to be removed from the PCI subsystem properly. + * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered **/ -static void __devexit -lpfc_pci_remove_one(struct pci_dev *pdev) +static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; - switch (phba->pci_dev_grp) { - case LPFC_PCI_DEV_LP: - lpfc_pci_remove_one_s3(pdev); - break; - case LPFC_PCI_DEV_OC: - lpfc_pci_remove_one_s4(pdev); - break; - default: + if (state == pci_channel_io_perm_failure) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1424 Invalid PCI device group: 0x%x\n", - phba->pci_dev_grp); - break; + "0472 PCI channel I/O permanent failure\n"); + /* Block all SCSI devices' I/Os on the host */ + lpfc_scsi_dev_block(phba); + /* Clean up all driver's outstanding SCSI I/Os */ + lpfc_sli_flush_fcp_rings(phba); + return PCI_ERS_RESULT_DISCONNECT; } - return; -} - -/** - * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management - * @pdev: pointer to PCI device - * @msg: power management message - * - * This routine is to be registered to the kernel's PCI subsystem to support - * system Power Management (PM). When PM invokes this method, it dispatches - * the action to the proper SLI-3 or SLI-4 device suspend routine, which will - * suspend the device. - * - * Return code - * 0 - driver suspended the device - * Error otherwise - **/ -static int -lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg) -{ - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - int rc = -ENODEV; - switch (phba->pci_dev_grp) { - case LPFC_PCI_DEV_LP: - rc = lpfc_pci_suspend_one_s3(pdev, msg); - break; - case LPFC_PCI_DEV_OC: - rc = lpfc_pci_suspend_one_s4(pdev, msg); - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1425 Invalid PCI device group: 0x%x\n", - phba->pci_dev_grp); - break; - } - return rc; -} + pci_disable_device(pdev); + /* + * There may be I/Os dropped by the firmware. + * Error iocb (I/O) on txcmplq and let the SCSI layer + * retry it after re-establishing link. + */ + pring = &psli->ring[psli->fcp_ring]; + lpfc_sli_abort_iocb_ring(phba, pring); -/** - * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management - * @pdev: pointer to PCI device - * - * This routine is to be registered to the kernel's PCI subsystem to support - * system Power Management (PM). When PM invokes this method, it dispatches - * the action to the proper SLI-3 or SLI-4 device resume routine, which will - * resume the device. - * - * Return code - * 0 - driver suspended the device - * Error otherwise - **/ -static int -lpfc_pci_resume_one(struct pci_dev *pdev) -{ - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - int rc = -ENODEV; + /* Disable interrupt */ + lpfc_disable_intr(phba); - switch (phba->pci_dev_grp) { - case LPFC_PCI_DEV_LP: - rc = lpfc_pci_resume_one_s3(pdev); - break; - case LPFC_PCI_DEV_OC: - rc = lpfc_pci_resume_one_s4(pdev); - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1426 Invalid PCI device group: 0x%x\n", - phba->pci_dev_grp); - break; - } - return rc; + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; } /** - * lpfc_io_error_detected - lpfc method for handling PCI I/O error + * lpfc_io_slot_reset - Restart a PCI device from scratch * @pdev: pointer to PCI device. - * @state: the current PCI connection state. * - * This routine is registered to the PCI subsystem for error handling. This - * function is called by the PCI subsystem after a PCI bus error affecting - * this device has been detected. When this routine is invoked, it dispatches - * the action to the proper SLI-3 or SLI-4 device error detected handling - * routine, which will perform the proper error detected operation. + * This routine is registered to the PCI subsystem for error handling. This is + * called after PCI bus has been reset to restart the PCI card from scratch, + * as if from a cold-boot. During the PCI subsystem error recovery, after the + * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform + * proper error recovery and then call this routine before calling the .resume + * method to recover the device. This function will initialize the HBA device, + * enable the interrupt, but it will just put the HBA to offline state without + * passing any I/O traffic. * * Return codes - * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery - * PCI_ERS_RESULT_DISCONNECT - device could not be recovered - **/ -static pci_ers_result_t -lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) + * PCI_ERS_RESULT_RECOVERED - the device has been recovered + * PCI_ERS_RESULT_DISCONNECT - device could not be recovered + */ +static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + struct lpfc_sli *psli = &phba->sli; + uint32_t intr_mode; - switch (phba->pci_dev_grp) { - case LPFC_PCI_DEV_LP: - rc = lpfc_io_error_detected_s3(pdev, state); - break; - case LPFC_PCI_DEV_OC: - rc = lpfc_io_error_detected_s4(pdev, state); - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1427 Invalid PCI device group: 0x%x\n", - phba->pci_dev_grp); - break; + dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n"); + if (pci_enable_device_mem(pdev)) { + printk(KERN_ERR "lpfc: Cannot re-enable " + "PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; } - return rc; -} -/** - * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch - * @pdev: pointer to PCI device. - * - * This routine is registered to the PCI subsystem for error handling. This - * function is called after PCI bus has been reset to restart the PCI card - * from scratch, as if from a cold-boot. When this routine is invoked, it - * dispatches the action to the proper SLI-3 or SLI-4 device reset handling - * routine, which will perform the proper device reset. - * - * Return codes - * PCI_ERS_RESULT_RECOVERED - the device has been recovered - * PCI_ERS_RESULT_DISCONNECT - device could not be recovered - **/ -static pci_ers_result_t -lpfc_io_slot_reset(struct pci_dev *pdev) -{ - struct Scsi_Host *shost = pci_get_drvdata(pdev); - struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT; + pci_restore_state(pdev); + if (pdev->is_busmaster) + pci_set_master(pdev); - switch (phba->pci_dev_grp) { - case LPFC_PCI_DEV_LP: - rc = lpfc_io_slot_reset_s3(pdev); - break; - case LPFC_PCI_DEV_OC: - rc = lpfc_io_slot_reset_s4(pdev); - break; - default: + spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI2_ACTIVE; + spin_unlock_irq(&phba->hbalock); + + /* Configure and enable interrupt */ + intr_mode = lpfc_enable_intr(phba, phba->intr_mode); + if (intr_mode == LPFC_INTR_ERROR) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1428 Invalid PCI device group: 0x%x\n", - phba->pci_dev_grp); - break; - } - return rc; + "0427 Cannot re-enable interrupt after " + "slot reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } else + phba->intr_mode = intr_mode; + + /* Take device offline; this will perform cleanup */ + lpfc_offline(phba); + lpfc_sli_brdrestart(phba); + + /* Log the current active interrupt mode */ + lpfc_log_intr_mode(phba, phba->intr_mode); + + return PCI_ERS_RESULT_RECOVERED; } /** - * lpfc_io_resume - lpfc method for resuming PCI I/O operation + * lpfc_io_resume - Resume PCI I/O operation * @pdev: pointer to PCI device * - * This routine is registered to the PCI subsystem for error handling. It - * is called when kernel error recovery tells the lpfc driver that it is - * OK to resume normal PCI operation after PCI bus error recovery. When - * this routine is invoked, it dispatches the action to the proper SLI-3 - * or SLI-4 device io_resume routine, which will resume the device operation. - **/ -static void -lpfc_io_resume(struct pci_dev *pdev) + * This routine is registered to the PCI subsystem for error handling. It is + * called when kernel error recovery tells the lpfc driver that it is ok to + * resume normal PCI operation after PCI bus error recovery. After this call, + * traffic can start to flow from this device again. + */ +static void lpfc_io_resume(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; - switch (phba->pci_dev_grp) { - case LPFC_PCI_DEV_LP: - lpfc_io_resume_s3(pdev); - break; - case LPFC_PCI_DEV_OC: - lpfc_io_resume_s4(pdev); - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1429 Invalid PCI device group: 0x%x\n", - phba->pci_dev_grp); - break; - } - return; + lpfc_online(phba); } static struct pci_device_id lpfc_id_table[] = { @@ -7939,10 +3469,6 @@ static struct pci_device_id lpfc_id_table[] = { PCI_ANY_ID, PCI_ANY_ID, }, {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S, PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK, - PCI_ANY_ID, PCI_ANY_ID, }, - {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S, - PCI_ANY_ID, PCI_ANY_ID, }, { 0 } }; @@ -7960,7 +3486,7 @@ static struct pci_driver lpfc_driver = { .probe = lpfc_pci_probe_one, .remove = __devexit_p(lpfc_pci_remove_one), .suspend = lpfc_pci_suspend_one, - .resume = lpfc_pci_resume_one, + .resume = lpfc_pci_resume_one, .err_handler = &lpfc_err_handler, }; diff --git a/trunk/drivers/scsi/lpfc/lpfc_logmsg.h b/trunk/drivers/scsi/lpfc/lpfc_logmsg.h index 954ba57970a3..1aa85709b012 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/trunk/drivers/scsi/lpfc/lpfc_logmsg.h @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * * @@ -18,39 +18,33 @@ * included with this package. * *******************************************************************/ -#define LOG_ELS 0x00000001 /* ELS events */ -#define LOG_DISCOVERY 0x00000002 /* Link discovery events */ -#define LOG_MBOX 0x00000004 /* Mailbox events */ -#define LOG_INIT 0x00000008 /* Initialization events */ -#define LOG_LINK_EVENT 0x00000010 /* Link events */ -#define LOG_IP 0x00000020 /* IP traffic history */ -#define LOG_FCP 0x00000040 /* FCP traffic history */ -#define LOG_NODE 0x00000080 /* Node table events */ -#define LOG_TEMP 0x00000100 /* Temperature sensor events */ -#define LOG_BG 0x00000200 /* BlockGuard events */ -#define LOG_MISC 0x00000400 /* Miscellaneous events */ -#define LOG_SLI 0x00000800 /* SLI events */ -#define LOG_FCP_ERROR 0x00001000 /* log errors, not underruns */ -#define LOG_LIBDFC 0x00002000 /* Libdfc events */ -#define LOG_VPORT 0x00004000 /* NPIV events */ -#define LOF_SECURITY 0x00008000 /* Security events */ -#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ -#define LOG_ALL_MSG 0xffffffff /* LOG all messages */ +#define LOG_ELS 0x1 /* ELS events */ +#define LOG_DISCOVERY 0x2 /* Link discovery events */ +#define LOG_MBOX 0x4 /* Mailbox events */ +#define LOG_INIT 0x8 /* Initialization events */ +#define LOG_LINK_EVENT 0x10 /* Link events */ +#define LOG_IP 0x20 /* IP traffic history */ +#define LOG_FCP 0x40 /* FCP traffic history */ +#define LOG_NODE 0x80 /* Node table events */ +#define LOG_TEMP 0x100 /* Temperature sensor events */ +#define LOG_BG 0x200 /* BlockGuard events */ +#define LOG_MISC 0x400 /* Miscellaneous events */ +#define LOG_SLI 0x800 /* SLI events */ +#define LOG_FCP_ERROR 0x1000 /* log errors, not underruns */ +#define LOG_LIBDFC 0x2000 /* Libdfc events */ +#define LOG_VPORT 0x4000 /* NPIV events */ +#define LOG_ALL_MSG 0xffff /* LOG all messages */ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ -do { \ - { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \ + do { \ + { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \ dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \ -} while (0) + } while (0) #define lpfc_printf_log(phba, level, mask, fmt, arg...) \ -do { \ - { uint32_t log_verbose = (phba)->pport ? \ - (phba)->pport->cfg_log_verbose : \ - (phba)->cfg_log_verbose; \ - if (((mask) & log_verbose) || (level[1] <= '3')) \ + do { \ + { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \ dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ - fmt, phba->brd_no, ##arg); \ - } \ -} while (0) + fmt, phba->brd_no, ##arg); } \ + } while (0) diff --git a/trunk/drivers/scsi/lpfc/lpfc_mbox.c b/trunk/drivers/scsi/lpfc/lpfc_mbox.c index b9b451c09010..134fc7fc2127 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_mbox.c +++ b/trunk/drivers/scsi/lpfc/lpfc_mbox.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -28,10 +28,8 @@ #include -#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" -#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -40,44 +38,6 @@ #include "lpfc_crtn.h" #include "lpfc_compat.h" -/** - * lpfc_dump_static_vport - Dump HBA's static vport information. - * @phba: pointer to lpfc hba data structure. - * @pmb: pointer to the driver internal queue element for mailbox command. - * @offset: offset for dumping vport info. - * - * The dump mailbox command provides a method for the device driver to obtain - * various types of information from the HBA device. - * - * This routine prepares the mailbox command for dumping list of static - * vports to be created. - **/ -void -lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, - uint16_t offset) -{ - MAILBOX_t *mb; - void *ctx; - - mb = &pmb->u.mb; - ctx = pmb->context2; - - /* Setup to dump vport info region */ - memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); - mb->mbxCommand = MBX_DUMP_MEMORY; - mb->un.varDmp.cv = 1; - mb->un.varDmp.type = DMP_NV_PARAMS; - mb->un.varDmp.entry_index = offset; - mb->un.varDmp.region_id = DMP_REGION_VPORT; - mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t); - mb->un.varDmp.co = 0; - mb->un.varDmp.resp_offset = 0; - pmb->context2 = ctx; - mb->mbxOwner = OWN_HOST; - - return; -} - /** * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory * @phba: pointer to lpfc hba data structure. @@ -98,7 +58,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset) MAILBOX_t *mb; void *ctx; - mb = &pmb->u.mb; + mb = &pmb->mb; ctx = pmb->context2; /* Setup to dump VPD region */ @@ -130,7 +90,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) MAILBOX_t *mb; void *ctx; - mb = &pmb->u.mb; + mb = &pmb->mb; /* Save context so that we can restore after memset */ ctx = pmb->context2; @@ -165,7 +125,7 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; - mb = &pmb->u.mb; + mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_NV; mb->mbxOwner = OWN_HOST; @@ -191,7 +151,7 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, { MAILBOX_t *mb; - mb = &pmb->u.mb; + mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_ASYNCEVT_ENABLE; mb->un.varCfgAsyncEvent.ring = ring; @@ -217,7 +177,7 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; - mb = &pmb->u.mb; + mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_HEARTBEAT; mb->mbxOwner = OWN_HOST; @@ -251,7 +211,7 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp) struct lpfc_sli *psli; psli = &phba->sli; - mb = &pmb->u.mb; + mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); INIT_LIST_HEAD(&mp->list); @@ -288,7 +248,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; - mb = &pmb->u.mb; + mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varClearLA.eventTag = phba->fc_eventTag; @@ -315,7 +275,7 @@ void lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { struct lpfc_vport *vport = phba->pport; - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); /* NEW_FEATURE @@ -361,7 +321,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) int lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; uint32_t attentionConditions[2]; /* Sanity check */ @@ -445,7 +405,7 @@ lpfc_init_link(struct lpfc_hba * phba, struct lpfc_sli *psli; MAILBOX_t *mb; - mb = &pmb->u.mb; + mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); psli = &phba->sli; @@ -532,7 +492,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) struct lpfc_sli *psli; psli = &phba->sli; - mb = &pmb->u.mb; + mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxOwner = OWN_HOST; @@ -555,7 +515,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); - mb->un.varRdSparm.vpi = vpi + phba->vpi_base; + mb->un.varRdSparm.vpi = vpi; /* save address for completion */ pmb->context1 = mp; @@ -584,12 +544,10 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did, { MAILBOX_t *mb; - mb = &pmb->u.mb; + mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varUnregDID.did = did; - if (vpi != 0xffff) - vpi += phba->vpi_base; mb->un.varUnregDID.vpi = vpi; mb->mbxCommand = MBX_UNREG_D_ID; @@ -615,7 +573,7 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; - mb = &pmb->u.mb; + mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_CONFIG; @@ -640,7 +598,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { MAILBOX_t *mb; - mb = &pmb->u.mb; + mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->mbxCommand = MBX_READ_LNK_STAT; @@ -649,7 +607,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) } /** - * lpfc_reg_rpi - Prepare a mailbox command for registering remote login + * lpfc_reg_login - Prepare a mailbox command for registering remote login * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. * @did: remote port identifier. @@ -673,23 +631,17 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) * 1 - DMA memory allocation failed **/ int -lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, +lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag) { - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; uint8_t *sparam; struct lpfc_dmabuf *mp; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varRegLogin.rpi = 0; - if (phba->sli_rev == LPFC_SLI_REV4) { - mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba); - if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR) - return 1; - } - - mb->un.varRegLogin.vpi = vpi + phba->vpi_base; + mb->un.varRegLogin.vpi = vpi; mb->un.varRegLogin.did = did; mb->un.varWords[30] = flag; /* Set flag to issue action on cmpl */ @@ -745,16 +697,15 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, { MAILBOX_t *mb; - mb = &pmb->u.mb; + mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varUnregLogin.rpi = (uint16_t) rpi; mb->un.varUnregLogin.rsvd1 = 0; - mb->un.varUnregLogin.vpi = vpi + phba->vpi_base; + mb->un.varUnregLogin.vpi = vpi; mb->mbxCommand = MBX_UNREG_LOGIN; mb->mbxOwner = OWN_HOST; - return; } @@ -774,15 +725,15 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, * This routine prepares the mailbox command for registering a virtual N_Port. **/ void -lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb) +lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid, + LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); - mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base; - mb->un.varRegVpi.sid = vport->fc_myDID; - mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; + mb->un.varRegVpi.vpi = vpi; + mb->un.varRegVpi.sid = sid; mb->mbxCommand = MBX_REG_VPI; mb->mbxOwner = OWN_HOST; @@ -809,10 +760,10 @@ lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb) void lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) { - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); - mb->un.varUnregVpi.vpi = vpi + phba->vpi_base; + mb->un.varUnregVpi.vpi = vpi; mb->mbxCommand = MBX_UNREG_VPI; mb->mbxOwner = OWN_HOST; @@ -901,7 +852,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba) void lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); mb->un.varRdRev.cv = 1; mb->un.varRdRev.v3req = 1; /* Request SLI3 info */ @@ -994,7 +945,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id, uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb) { int i; - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; struct config_hbq_var *hbqmb = &mb->un.varCfgHbq; memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); @@ -1069,7 +1020,7 @@ void lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) { int i; - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; struct lpfc_sli *psli; struct lpfc_sli_ring *pring; @@ -1124,7 +1075,7 @@ void lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr; - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; dma_addr_t pdma_addr; uint32_t bar_low, bar_high; size_t offset; @@ -1148,22 +1099,21 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* If HBA supports SLI=3 ask for it */ - if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) { + if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) { if (phba->cfg_enable_bg) mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ - mb->un.varCfgPort.cdss = 1; /* Configure Security */ mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */ mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); if (phba->max_vpi && phba->cfg_enable_npiv && phba->vpd.sli3Feat.cmv) { - mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI; + mb->un.varCfgPort.max_vpi = phba->max_vpi; mb->un.varCfgPort.cmv = 1; } else mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; } else - phba->sli_rev = LPFC_SLI_REV2; + phba->sli_rev = 2; mb->un.varCfgPort.sli_mode = phba->sli_rev; /* Now setup pcb */ @@ -1295,7 +1245,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) void lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) { - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); mb->mbxCommand = MBX_KILL_BOARD; @@ -1354,98 +1304,29 @@ lpfc_mbox_get(struct lpfc_hba * phba) return mbq; } -/** - * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list - * @phba: pointer to lpfc hba data structure. - * @mbq: pointer to the driver internal queue element for mailbox command. - * - * This routine put the completed mailbox command into the mailbox command - * complete list. This is the unlocked version of the routine. The mailbox - * complete list is used by the driver worker thread to process mailbox - * complete callback functions outside the driver interrupt handler. - **/ -void -__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq) -{ - list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); -} - /** * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list * @phba: pointer to lpfc hba data structure. * @mbq: pointer to the driver internal queue element for mailbox command. * * This routine put the completed mailbox command into the mailbox command - * complete list. This is the locked version of the routine. The mailbox - * complete list is used by the driver worker thread to process mailbox - * complete callback functions outside the driver interrupt handler. + * complete list. This routine is called from driver interrupt handler + * context.The mailbox complete list is used by the driver worker thread + * to process mailbox complete callback functions outside the driver interrupt + * handler. **/ void -lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq) +lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) { unsigned long iflag; /* This function expects to be called from interrupt context */ spin_lock_irqsave(&phba->hbalock, iflag); - __lpfc_mbox_cmpl_put(phba, mbq); + list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); spin_unlock_irqrestore(&phba->hbalock, iflag); return; } -/** - * lpfc_mbox_cmd_check - Check the validality of a mailbox command - * @phba: pointer to lpfc hba data structure. - * @mboxq: pointer to the driver internal queue element for mailbox command. - * - * This routine is to check whether a mailbox command is valid to be issued. - * This check will be performed by both the mailbox issue API when a client - * is to issue a mailbox command to the mailbox transport. - * - * Return 0 - pass the check, -ENODEV - fail the check - **/ -int -lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) -{ - /* Mailbox command that have a completion handler must also have a - * vport specified. - */ - if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl && - mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) { - if (!mboxq->vport) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT, - "1814 Mbox x%x failed, no vport\n", - mboxq->u.mb.mbxCommand); - dump_stack(); - return -ENODEV; - } - } - return 0; -} - -/** - * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command - * @phba: pointer to lpfc hba data structure. - * - * This routine is to check whether the HBA device is ready for posting a - * mailbox command. It is used by the mailbox transport API at the time the - * to post a mailbox command to the device. - * - * Return 0 - pass the check, -ENODEV - fail the check - **/ -int -lpfc_mbox_dev_check(struct lpfc_hba *phba) -{ - /* If the PCI channel is in offline state, do not issue mbox */ - if (unlikely(pci_channel_offline(phba->pcidev))) - return -ENODEV; - - /* If the HBA is in error state, do not issue mbox */ - if (phba->link_state == LPFC_HBA_ERROR) - return -ENODEV; - - return 0; -} - /** * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value * @phba: pointer to lpfc hba data structure. @@ -1469,475 +1350,6 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) case MBX_WRITE_WWN: /* 0x98 */ case MBX_LOAD_EXP_ROM: /* 0x9C */ return LPFC_MBOX_TMO_FLASH_CMD; - case MBX_SLI4_CONFIG: /* 0x9b */ - return LPFC_MBOX_SLI4_CONFIG_TMO; } return LPFC_MBOX_TMO; } - -/** - * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command - * @mbox: pointer to lpfc mbox command. - * @sgentry: sge entry index. - * @phyaddr: physical address for the sge - * @length: Length of the sge. - * - * This routine sets up an entry in the non-embedded mailbox command at the sge - * index location. - **/ -void -lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry, - dma_addr_t phyaddr, uint32_t length) -{ - struct lpfc_mbx_nembed_cmd *nembed_sge; - - nembed_sge = (struct lpfc_mbx_nembed_cmd *) - &mbox->u.mqe.un.nembed_cmd; - nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr); - nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr); - nembed_sge->sge[sgentry].length = length; -} - -/** - * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command - * @mbox: pointer to lpfc mbox command. - * @sgentry: sge entry index. - * - * This routine gets an entry from the non-embedded mailbox command at the sge - * index location. - **/ -void -lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry, - struct lpfc_mbx_sge *sge) -{ - struct lpfc_mbx_nembed_cmd *nembed_sge; - - nembed_sge = (struct lpfc_mbx_nembed_cmd *) - &mbox->u.mqe.un.nembed_cmd; - sge->pa_lo = nembed_sge->sge[sgentry].pa_lo; - sge->pa_hi = nembed_sge->sge[sgentry].pa_hi; - sge->length = nembed_sge->sge[sgentry].length; -} - -/** - * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command - * @phba: pointer to lpfc hba data structure. - * @mbox: pointer to lpfc mbox command. - * - * This routine frees SLI4 specific mailbox command for sending IOCTL command. - **/ -void -lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox) -{ - struct lpfc_mbx_sli4_config *sli4_cfg; - struct lpfc_mbx_sge sge; - dma_addr_t phyaddr; - uint32_t sgecount, sgentry; - - sli4_cfg = &mbox->u.mqe.un.sli4_config; - - /* For embedded mbox command, just free the mbox command */ - if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { - mempool_free(mbox, phba->mbox_mem_pool); - return; - } - - /* For non-embedded mbox command, we need to free the pages first */ - sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr); - /* There is nothing we can do if there is no sge address array */ - if (unlikely(!mbox->sge_array)) { - mempool_free(mbox, phba->mbox_mem_pool); - return; - } - /* Each non-embedded DMA memory was allocated in the length of a page */ - for (sgentry = 0; sgentry < sgecount; sgentry++) { - lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge); - phyaddr = getPaddr(sge.pa_hi, sge.pa_lo); - dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE, - mbox->sge_array->addr[sgentry], phyaddr); - } - /* Free the sge address array memory */ - kfree(mbox->sge_array); - /* Finally, free the mailbox command itself */ - mempool_free(mbox, phba->mbox_mem_pool); -} - -/** - * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command - * @phba: pointer to lpfc hba data structure. - * @mbox: pointer to lpfc mbox command. - * @subsystem: The sli4 config sub mailbox subsystem. - * @opcode: The sli4 config sub mailbox command opcode. - * @length: Length of the sli4 config mailbox command. - * - * This routine sets up the header fields of SLI4 specific mailbox command - * for sending IOCTL command. - * - * Return: the actual length of the mbox command allocated (mostly useful - * for none embedded mailbox command). - **/ -int -lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, - uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb) -{ - struct lpfc_mbx_sli4_config *sli4_config; - union lpfc_sli4_cfg_shdr *cfg_shdr = NULL; - uint32_t alloc_len; - uint32_t resid_len; - uint32_t pagen, pcount; - void *viraddr; - dma_addr_t phyaddr; - - /* Set up SLI4 mailbox command header fields */ - memset(mbox, 0, sizeof(*mbox)); - bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG); - - /* Set up SLI4 ioctl command header fields */ - sli4_config = &mbox->u.mqe.un.sli4_config; - - /* Setup for the embedded mbox command */ - if (emb) { - /* Set up main header fields */ - bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1); - sli4_config->header.cfg_mhdr.payload_length = - LPFC_MBX_CMD_HDR_LENGTH + length; - /* Set up sub-header fields following main header */ - bf_set(lpfc_mbox_hdr_opcode, - &sli4_config->header.cfg_shdr.request, opcode); - bf_set(lpfc_mbox_hdr_subsystem, - &sli4_config->header.cfg_shdr.request, subsystem); - sli4_config->header.cfg_shdr.request.request_length = length; - return length; - } - - /* Setup for the none-embedded mbox command */ - pcount = (PAGE_ALIGN(length))/PAGE_SIZE; - pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? - LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; - /* Allocate record for keeping SGE virtual addresses */ - mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), - GFP_KERNEL); - if (!mbox->sge_array) - return 0; - - for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) { - /* The DMA memory is always allocated in the length of a - * page even though the last SGE might not fill up to a - * page, this is used as a priori size of PAGE_SIZE for - * the later DMA memory free. - */ - viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE, - &phyaddr, GFP_KERNEL); - /* In case of malloc fails, proceed with whatever we have */ - if (!viraddr) - break; - mbox->sge_array->addr[pagen] = viraddr; - /* Keep the first page for later sub-header construction */ - if (pagen == 0) - cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr; - resid_len = length - alloc_len; - if (resid_len > PAGE_SIZE) { - lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, - PAGE_SIZE); - alloc_len += PAGE_SIZE; - } else { - lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, - resid_len); - alloc_len = length; - } - } - - /* Set up main header fields in mailbox command */ - sli4_config->header.cfg_mhdr.payload_length = alloc_len; - bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen); - - /* Set up sub-header fields into the first page */ - if (pagen > 0) { - bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode); - bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem); - cfg_shdr->request.request_length = - alloc_len - sizeof(union lpfc_sli4_cfg_shdr); - } - /* The sub-header is in DMA memory, which needs endian converstion */ - lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr, - sizeof(union lpfc_sli4_cfg_shdr)); - - return alloc_len; -} - -/** - * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command - * @phba: pointer to lpfc hba data structure. - * @mbox: pointer to lpfc mbox command. - * - * This routine gets the opcode from a SLI4 specific mailbox command for - * sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG - * (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be - * returned. - **/ -uint8_t -lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox) -{ - struct lpfc_mbx_sli4_config *sli4_cfg; - union lpfc_sli4_cfg_shdr *cfg_shdr; - - if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG) - return 0; - sli4_cfg = &mbox->u.mqe.un.sli4_config; - - /* For embedded mbox command, get opcode from embedded sub-header*/ - if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { - cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; - return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); - } - - /* For non-embedded mbox command, get opcode from first dma page */ - if (unlikely(!mbox->sge_array)) - return 0; - cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0]; - return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); -} - -/** - * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox - * @mboxq: pointer to lpfc mbox command. - * - * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES - * mailbox command. - **/ -void -lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) -{ - /* Set up SLI4 mailbox command header fields */ - memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); - bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS); - - /* Set up host requested features. */ - bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); - - /* Virtual fabrics and FIPs are not supported yet. */ - bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0); - - /* Enable DIF (block guard) only if configured to do so. */ - if (phba->cfg_enable_bg) - bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1); - - /* Enable NPIV only if configured to do so. */ - if (phba->max_vpi && phba->cfg_enable_npiv) - bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1); - - return; -} - -/** - * lpfc_init_vfi - Initialize the INIT_VFI mailbox command - * @mbox: pointer to lpfc mbox command to initialize. - * @vport: Vport associated with the VF. - * - * This routine initializes @mbox to all zeros and then fills in the mailbox - * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI - * in the context of an FCF. The driver issues this command to setup a VFI - * before issuing a FLOGI to login to the VSAN. The driver should also issue a - * REG_VFI after a successful VSAN login. - **/ -void -lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport) -{ - struct lpfc_mbx_init_vfi *init_vfi; - - memset(mbox, 0, sizeof(*mbox)); - init_vfi = &mbox->u.mqe.un.init_vfi; - bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI); - bf_set(lpfc_init_vfi_vr, init_vfi, 1); - bf_set(lpfc_init_vfi_vt, init_vfi, 1); - bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base); - bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi); -} - -/** - * lpfc_reg_vfi - Initialize the REG_VFI mailbox command - * @mbox: pointer to lpfc mbox command to initialize. - * @vport: vport associated with the VF. - * @phys: BDE DMA bus address used to send the service parameters to the HBA. - * - * This routine initializes @mbox to all zeros and then fills in the mailbox - * fields from @vport, and uses @buf as a DMAable buffer to send the vport's - * fc service parameters to the HBA for this VFI. REG_VFI configures virtual - * fabrics identified by VFI in the context of an FCF. - **/ -void -lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) -{ - struct lpfc_mbx_reg_vfi *reg_vfi; - - memset(mbox, 0, sizeof(*mbox)); - reg_vfi = &mbox->u.mqe.un.reg_vfi; - bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI); - bf_set(lpfc_reg_vfi_vp, reg_vfi, 1); - bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base); - bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); - bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base); - reg_vfi->bde.addrHigh = putPaddrHigh(phys); - reg_vfi->bde.addrLow = putPaddrLow(phys); - reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); - reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID); -} - -/** - * lpfc_init_vpi - Initialize the INIT_VPI mailbox command - * @mbox: pointer to lpfc mbox command to initialize. - * @vpi: VPI to be initialized. - * - * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the - * command to activate a virtual N_Port. The HBA assigns a MAC address to use - * with the virtual N Port. The SLI Host issues this command before issuing a - * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a - * successful virtual NPort login. - **/ -void -lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi) -{ - memset(mbox, 0, sizeof(*mbox)); - bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI); - bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi); -} - -/** - * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command - * @mbox: pointer to lpfc mbox command to initialize. - * @vfi: VFI to be unregistered. - * - * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric - * (logical NPort) into the inactive state. The SLI Host must have logged out - * and unregistered all remote N_Ports to abort any activity on the virtual - * fabric. The SLI Port posts the mailbox response after marking the virtual - * fabric inactive. - **/ -void -lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi) -{ - memset(mbox, 0, sizeof(*mbox)); - bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); - bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi); -} - -/** - * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters. - * @phba: pointer to the hba structure containing. - * @mbox: pointer to lpfc mbox command to initialize. - * - * This function create a SLI4 dump mailbox command to dump FCoE - * parameters stored in region 23. - **/ -int -lpfc_dump_fcoe_param(struct lpfc_hba *phba, - struct lpfcMboxq *mbox) -{ - struct lpfc_dmabuf *mp = NULL; - MAILBOX_t *mb; - - memset(mbox, 0, sizeof(*mbox)); - mb = &mbox->u.mb; - - mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); - if (mp) - mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); - - if (!mp || !mp->virt) { - kfree(mp); - /* dump_fcoe_param failed to allocate memory */ - lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, - "2569 lpfc_dump_fcoe_param: memory" - " allocation failed \n"); - return 1; - } - - memset(mp->virt, 0, LPFC_BPL_SIZE); - INIT_LIST_HEAD(&mp->list); - - /* save address for completion */ - mbox->context1 = (uint8_t *) mp; - - mb->mbxCommand = MBX_DUMP_MEMORY; - mb->un.varDmp.type = DMP_NV_PARAMS; - mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM; - mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE; - mb->un.varWords[3] = putPaddrLow(mp->phys); - mb->un.varWords[4] = putPaddrHigh(mp->phys); - return 0; -} - -/** - * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command - * @phba: pointer to the hba structure containing the FCF index and RQ ID. - * @mbox: pointer to lpfc mbox command to initialize. - * - * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The - * SLI Host uses the command to activate an FCF after it has acquired FCF - * information via a READ_FCF mailbox command. This mailbox command also is used - * to indicate where received unsolicited frames from this FCF will be sent. By - * default this routine will set up the FCF to forward all unsolicited frames - * the the RQ ID passed in the @phba. This can be overridden by the caller for - * more complicated setups. - **/ -void -lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox) -{ - struct lpfc_mbx_reg_fcfi *reg_fcfi; - - memset(mbox, 0, sizeof(*mbox)); - reg_fcfi = &mbox->u.mqe.un.reg_fcfi; - bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI); - bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id); - bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); - bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); - bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); - bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx); - /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */ - bf_set(lpfc_reg_fcfi_mam, reg_fcfi, - (~phba->fcf.addr_mode) & 0x3); - if (phba->fcf.fcf_flag & FCF_VALID_VLAN) { - bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); - bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id); - } -} - -/** - * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command - * @mbox: pointer to lpfc mbox command to initialize. - * @fcfi: FCFI to be unregistered. - * - * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). - * The SLI Host uses the command to inactivate an FCFI. - **/ -void -lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi) -{ - memset(mbox, 0, sizeof(*mbox)); - bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI); - bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi); -} - -/** - * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command - * @mbox: pointer to lpfc mbox command to initialize. - * @ndlp: The nodelist structure that describes the RPI to resume. - * - * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a - * link event. - **/ -void -lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp) -{ - struct lpfc_mbx_resume_rpi *resume_rpi; - - memset(mbox, 0, sizeof(*mbox)); - resume_rpi = &mbox->u.mqe.un.resume_rpi; - bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI); - bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi); - bf_set(lpfc_resume_rpi_vpi, resume_rpi, - ndlp->vport->vpi + ndlp->vport->phba->vpi_base); - bf_set(lpfc_resume_rpi_vfi, resume_rpi, - ndlp->vport->vfi + ndlp->vport->phba->vfi_base); -} diff --git a/trunk/drivers/scsi/lpfc/lpfc_mem.c b/trunk/drivers/scsi/lpfc/lpfc_mem.c index e198c917c13e..35a976733398 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_mem.c +++ b/trunk/drivers/scsi/lpfc/lpfc_mem.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -28,10 +28,8 @@ #include -#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" -#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -47,7 +45,7 @@ * @phba: HBA to allocate pools for * * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool, - * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools + * lpfc_mbuf_pool, lpfc_hbq_pool. Creates and allocates kmalloc-backed mempools * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. * * Notes: Not interrupt-safe. Must be called with no locks held. If any @@ -58,30 +56,19 @@ * -ENOMEM on failure (if any memory allocations fail) **/ int -lpfc_mem_alloc(struct lpfc_hba *phba, int align) +lpfc_mem_alloc(struct lpfc_hba * phba) { struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; int longs; int i; - if (phba->sli_rev == LPFC_SLI_REV4) - phba->lpfc_scsi_dma_buf_pool = - pci_pool_create("lpfc_scsi_dma_buf_pool", - phba->pcidev, - phba->cfg_sg_dma_buf_size, - phba->cfg_sg_dma_buf_size, - 0); - else - phba->lpfc_scsi_dma_buf_pool = - pci_pool_create("lpfc_scsi_dma_buf_pool", - phba->pcidev, phba->cfg_sg_dma_buf_size, - align, 0); + phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", + phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0); if (!phba->lpfc_scsi_dma_buf_pool) goto fail; phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev, - LPFC_BPL_SIZE, - align, 0); + LPFC_BPL_SIZE, 8,0); if (!phba->lpfc_mbuf_pool) goto fail_free_dma_buf_pool; @@ -110,31 +97,23 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) sizeof(struct lpfc_nodelist)); if (!phba->nlp_mem_pool) goto fail_free_mbox_pool; - phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool", - phba->pcidev, - LPFC_HDR_BUF_SIZE, align, 0); - if (!phba->lpfc_hrb_pool) + + phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev, + LPFC_BPL_SIZE, 8, 0); + if (!phba->lpfc_hbq_pool) goto fail_free_nlp_mem_pool; - phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool", - phba->pcidev, - LPFC_DATA_BUF_SIZE, align, 0); - if (!phba->lpfc_drb_pool) - goto fail_free_hbq_pool; /* vpi zero is reserved for the physical port so add 1 to max */ longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG; phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); if (!phba->vpi_bmask) - goto fail_free_dbq_pool; + goto fail_free_hbq_pool; return 0; - fail_free_dbq_pool: - pci_pool_destroy(phba->lpfc_drb_pool); - phba->lpfc_drb_pool = NULL; fail_free_hbq_pool: - pci_pool_destroy(phba->lpfc_hrb_pool); - phba->lpfc_hrb_pool = NULL; + lpfc_sli_hbqbuf_free_all(phba); + pci_pool_destroy(phba->lpfc_hbq_pool); fail_free_nlp_mem_pool: mempool_destroy(phba->nlp_mem_pool); phba->nlp_mem_pool = NULL; @@ -157,73 +136,27 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) } /** - * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc + * lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc * @phba: HBA to free memory for * - * Description: Free the memory allocated by lpfc_mem_alloc routine. This - * routine is a the counterpart of lpfc_mem_alloc. + * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, + * lpfc_hbq_pool. Frees kmalloc-backed mempools for LPFC_MBOXQ_t and + * lpfc_nodelist. Also frees the VPI bitmask * * Returns: None **/ void -lpfc_mem_free(struct lpfc_hba *phba) +lpfc_mem_free(struct lpfc_hba * phba) { - int i; + struct lpfc_sli *psli = &phba->sli; struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; + LPFC_MBOXQ_t *mbox, *next_mbox; + struct lpfc_dmabuf *mp; + int i; - /* Free VPI bitmask memory */ kfree(phba->vpi_bmask); - - /* Free HBQ pools */ lpfc_sli_hbqbuf_free_all(phba); - pci_pool_destroy(phba->lpfc_drb_pool); - phba->lpfc_drb_pool = NULL; - pci_pool_destroy(phba->lpfc_hrb_pool); - phba->lpfc_hrb_pool = NULL; - - /* Free NLP memory pool */ - mempool_destroy(phba->nlp_mem_pool); - phba->nlp_mem_pool = NULL; - - /* Free mbox memory pool */ - mempool_destroy(phba->mbox_mem_pool); - phba->mbox_mem_pool = NULL; - - /* Free MBUF memory pool */ - for (i = 0; i < pool->current_count; i++) - pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, - pool->elements[i].phys); - kfree(pool->elements); - - pci_pool_destroy(phba->lpfc_mbuf_pool); - phba->lpfc_mbuf_pool = NULL; - /* Free DMA buffer memory pool */ - pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); - phba->lpfc_scsi_dma_buf_pool = NULL; - - return; -} - -/** - * lpfc_mem_free_all - Frees all PCI and driver memory - * @phba: HBA to free memory for - * - * Description: Free memory from PCI and driver memory pools and also those - * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees - * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees - * the VPI bitmask. - * - * Returns: None - **/ -void -lpfc_mem_free_all(struct lpfc_hba *phba) -{ - struct lpfc_sli *psli = &phba->sli; - LPFC_MBOXQ_t *mbox, *next_mbox; - struct lpfc_dmabuf *mp; - - /* Free memory used in mailbox queue back to mailbox memory pool */ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) { mp = (struct lpfc_dmabuf *) (mbox->context1); if (mp) { @@ -233,7 +166,6 @@ lpfc_mem_free_all(struct lpfc_hba *phba) list_del(&mbox->list); mempool_free(mbox, phba->mbox_mem_pool); } - /* Free memory used in mailbox cmpl list back to mailbox memory pool */ list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) { mp = (struct lpfc_dmabuf *) (mbox->context1); if (mp) { @@ -243,10 +175,8 @@ lpfc_mem_free_all(struct lpfc_hba *phba) list_del(&mbox->list); mempool_free(mbox, phba->mbox_mem_pool); } - /* Free the active mailbox command back to the mailbox memory pool */ - spin_lock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - spin_unlock_irq(&phba->hbalock); if (psli->mbox_active) { mbox = psli->mbox_active; mp = (struct lpfc_dmabuf *) (mbox->context1); @@ -258,14 +188,27 @@ lpfc_mem_free_all(struct lpfc_hba *phba) psli->mbox_active = NULL; } - /* Free and destroy all the allocated memory pools */ - lpfc_mem_free(phba); + for (i = 0; i < pool->current_count; i++) + pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt, + pool->elements[i].phys); + kfree(pool->elements); + + pci_pool_destroy(phba->lpfc_hbq_pool); + mempool_destroy(phba->nlp_mem_pool); + mempool_destroy(phba->mbox_mem_pool); + + pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool); + pci_pool_destroy(phba->lpfc_mbuf_pool); + + phba->lpfc_hbq_pool = NULL; + phba->nlp_mem_pool = NULL; + phba->mbox_mem_pool = NULL; + phba->lpfc_scsi_dma_buf_pool = NULL; + phba->lpfc_mbuf_pool = NULL; /* Free the iocb lookup array */ kfree(psli->iocbq_lookup); psli->iocbq_lookup = NULL; - - return; } /** @@ -362,7 +305,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma) * lpfc_els_hbq_alloc - Allocate an HBQ buffer * @phba: HBA to allocate HBQ buffer for * - * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI + * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI * pool along a non-DMA-mapped container for it. * * Notes: Not interrupt-safe. Must be called with no locks held. @@ -380,7 +323,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) if (!hbqbp) return NULL; - hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, + hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL, &hbqbp->dbuf.phys); if (!hbqbp->dbuf.virt) { kfree(hbqbp); @@ -391,7 +334,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) } /** - * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc + * lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc * @phba: HBA buffer was allocated for * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc * @@ -405,72 +348,11 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba) void lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp) { - pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); + pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys); kfree(hbqbp); return; } -/** - * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer - * @phba: HBA to allocate a receive buffer for - * - * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI - * pool along a non-DMA-mapped container for it. - * - * Notes: Not interrupt-safe. Must be called with no locks held. - * - * Returns: - * pointer to HBQ on success - * NULL on failure - **/ -struct hbq_dmabuf * -lpfc_sli4_rb_alloc(struct lpfc_hba *phba) -{ - struct hbq_dmabuf *dma_buf; - - dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL); - if (!dma_buf) - return NULL; - - dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL, - &dma_buf->hbuf.phys); - if (!dma_buf->hbuf.virt) { - kfree(dma_buf); - return NULL; - } - dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL, - &dma_buf->dbuf.phys); - if (!dma_buf->dbuf.virt) { - pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt, - dma_buf->hbuf.phys); - kfree(dma_buf); - return NULL; - } - dma_buf->size = LPFC_BPL_SIZE; - return dma_buf; -} - -/** - * lpfc_sli4_rb_free - Frees a receive buffer - * @phba: HBA buffer was allocated for - * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc - * - * Description: Frees both the container and the DMA-mapped buffers returned by - * lpfc_sli4_rb_alloc. - * - * Notes: Can be called with or without locks held. - * - * Returns: None - **/ -void -lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab) -{ - pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys); - pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys); - kfree(dmab); - return; -} - /** * lpfc_in_buf_free - Free a DMA buffer * @phba: HBA buffer is associated with diff --git a/trunk/drivers/scsi/lpfc/lpfc_nportdisc.c b/trunk/drivers/scsi/lpfc/lpfc_nportdisc.c index 09f659f77bb3..08cdc77af41c 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/trunk/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -28,10 +28,8 @@ #include #include -#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" -#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -363,7 +361,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (!mbox) goto out; - rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, + rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID, (uint8_t *) sp, mbox, 0); if (rc) { mempool_free(mbox, phba->mbox_mem_pool); @@ -497,19 +495,11 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); else lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); - if ((ndlp->nlp_type & NLP_FABRIC) && - vport->port_type == LPFC_NPIV_PORT) { - lpfc_linkdown_port(vport); - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); - spin_lock_irq(shost->host_lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(shost->host_lock); - ndlp->nlp_last_elscmd = ELS_CMD_FDISC; - } else if ((!(ndlp->nlp_type & NLP_FABRIC) && - ((ndlp->nlp_type & NLP_FCP_TARGET) || - !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || - (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { + if ((!(ndlp->nlp_type & NLP_FABRIC) && + ((ndlp->nlp_type & NLP_FCP_TARGET) || + !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || + (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { /* Only try to re-login if this is NOT a Fabric Node */ mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); spin_lock_irq(shost->host_lock); @@ -577,7 +567,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); - if (!(ndlp->nlp_flag & NLP_RPI_VALID)) { + if (!ndlp->nlp_rpi) { ndlp->nlp_flag &= ~NLP_NPR_ADISC; return 0; } @@ -867,7 +857,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, lpfc_unreg_rpi(vport, ndlp); - if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID, + if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID, (uint8_t *) sp, mbox, 0) == 0) { switch (ndlp->nlp_DID) { case NameServer_DID: @@ -1078,7 +1068,6 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, *rspiocb; IOCB_t *irsp; ADISC *ap; - int rc; cmdiocb = (struct lpfc_iocbq *) arg; rspiocb = cmdiocb->context_un.rsp_iocb; @@ -1104,15 +1093,6 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, return ndlp->nlp_state; } - if (phba->sli_rev == LPFC_SLI_REV4) { - rc = lpfc_sli4_resume_rpi(ndlp); - if (rc) { - /* Stay in state and retry. */ - ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; - return ndlp->nlp_state; - } - } - if (ndlp->nlp_type & NLP_FCP_TARGET) { ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); @@ -1120,7 +1100,6 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } - return ndlp->nlp_state; } @@ -1211,7 +1190,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ if ((mb = phba->sli.mbox_active)) { - if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && + if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { lpfc_nlp_put(ndlp); mb->context2 = NULL; @@ -1221,7 +1200,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, spin_lock_irq(&phba->hbalock); list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { - if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && + if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { mp = (struct lpfc_dmabuf *) (mb->context1); if (mp) { @@ -1272,7 +1251,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; uint32_t did = mb->un.varWords[1]; if (mb->mbxStatus) { @@ -1304,7 +1283,6 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, } ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_VALID; /* Only if we are not a fabric nport do we issue PRLI */ if (!(ndlp->nlp_type & NLP_FABRIC)) { @@ -1900,12 +1878,11 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, void *arg, uint32_t evt) { LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg; - MAILBOX_t *mb = &pmb->u.mb; + MAILBOX_t *mb = &pmb->mb; - if (!mb->mbxStatus) { + if (!mb->mbxStatus) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_VALID; - } else { + else { if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; diff --git a/trunk/drivers/scsi/lpfc/lpfc_scsi.c b/trunk/drivers/scsi/lpfc/lpfc_scsi.c index e9fa6762044a..8032c5adb6a9 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_scsi.c +++ b/trunk/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -31,10 +31,8 @@ #include #include "lpfc_version.h" -#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" -#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -59,8 +57,6 @@ static char *dif_op_str[] = { "SCSI_PROT_READ_CONVERT", "SCSI_PROT_WRITE_CONVERT" }; -static void -lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); static void lpfc_debug_save_data(struct scsi_cmnd *cmnd) @@ -329,7 +325,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); shost_for_each_device(sdev, shost) { new_queue_depth = @@ -383,7 +379,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); shost_for_each_device(sdev, shost) { if (vports[i]->cfg_lun_queue_depth <= @@ -431,7 +427,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba) vports = lpfc_create_vport_work_array(phba); if (vports != NULL) - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { shost = lpfc_shost_from_vport(vports[i]); shost_for_each_device(sdev, shost) { rport = starget_to_rport(scsi_target(sdev)); @@ -442,23 +438,22 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba) } /** - * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec + * lpfc_new_scsi_buf - Scsi buffer allocator * @vport: The virtual port for which this call being executed. - * @num_to_allocate: The requested number of buffers to allocate. * - * This routine allocates a scsi buffer for device with SLI-3 interface spec, - * the scsi buffer contains all the necessary information needed to initiate - * a SCSI I/O. The non-DMAable buffer region contains information to build - * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP, - * and the initial BPL. In addition to allocating memory, the FCP CMND and - * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB. + * This routine allocates a scsi buffer, which contains all the necessary + * information needed to initiate a SCSI I/O. The non-DMAable buffer region + * contains information to build the IOCB. The DMAable region contains + * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to + * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL + * and the BPL BDE is setup in the IOCB. * * Return codes: - * int - number of scsi buffers that were allocated. - * 0 = failure, less than num_to_alloc is a partial failure. + * NULL - Error + * Pointer to lpfc_scsi_buf data structure - Success **/ -static int -lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) +static struct lpfc_scsi_buf * +lpfc_new_scsi_buf(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; struct lpfc_scsi_buf *psb; @@ -468,401 +463,107 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) dma_addr_t pdma_phys_fcp_rsp; dma_addr_t pdma_phys_bpl; uint16_t iotag; - int bcnt; - for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { - psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); - if (!psb) - break; + psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); + if (!psb) + return NULL; - /* - * Get memory from the pci pool to map the virt space to pci - * bus space for an I/O. The DMA buffer includes space for the - * struct fcp_cmnd, struct fcp_rsp and the number of bde's - * necessary to support the sg_tablesize. - */ - psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, - GFP_KERNEL, &psb->dma_handle); - if (!psb->data) { - kfree(psb); - break; - } - - /* Initialize virtual ptrs to dma_buf region. */ - memset(psb->data, 0, phba->cfg_sg_dma_buf_size); - - /* Allocate iotag for psb->cur_iocbq. */ - iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); - if (iotag == 0) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, - psb->data, psb->dma_handle); - kfree(psb); - break; - } - psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; - - psb->fcp_cmnd = psb->data; - psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); - psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp); - - /* Initialize local short-hand pointers. */ - bpl = psb->fcp_bpl; - pdma_phys_fcp_cmd = psb->dma_handle; - pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); - pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp); - - /* - * The first two bdes are the FCP_CMD and FCP_RSP. The balance - * are sg list bdes. Initialize the first two and leave the - * rest for queuecommand. - */ - bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); - bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); - bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); - bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); - - /* Setup the physical region for the FCP RSP */ - bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); - bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); - bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); - bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; - bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); - - /* - * Since the IOCB for the FCP I/O is built into this - * lpfc_scsi_buf, initialize it with all known data now. - */ - iocb = &psb->cur_iocbq.iocb; - iocb->un.fcpi64.bdl.ulpIoTag32 = 0; - if ((phba->sli_rev == 3) && - !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { - /* fill in immediate fcp command BDE */ - iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; - iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); - iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, - unsli3.fcp_ext.icd); - iocb->un.fcpi64.bdl.addrHigh = 0; - iocb->ulpBdeCount = 0; - iocb->ulpLe = 0; - /* fill in responce BDE */ - iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = - BUFF_TYPE_BDE_64; - iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = - sizeof(struct fcp_rsp); - iocb->unsli3.fcp_ext.rbde.addrLow = - putPaddrLow(pdma_phys_fcp_rsp); - iocb->unsli3.fcp_ext.rbde.addrHigh = - putPaddrHigh(pdma_phys_fcp_rsp); - } else { - iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; - iocb->un.fcpi64.bdl.bdeSize = - (2 * sizeof(struct ulp_bde64)); - iocb->un.fcpi64.bdl.addrLow = - putPaddrLow(pdma_phys_bpl); - iocb->un.fcpi64.bdl.addrHigh = - putPaddrHigh(pdma_phys_bpl); - iocb->ulpBdeCount = 1; - iocb->ulpLe = 1; - } - iocb->ulpClass = CLASS3; - psb->status = IOSTAT_SUCCESS; - /* Put it back into the SCSI buffer list */ - lpfc_release_scsi_buf_s4(phba, psb); - - } - - return bcnt; -} - -/** - * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort - * @phba: pointer to lpfc hba data structure. - * @axri: pointer to the fcp xri abort wcqe structure. - * - * This routine is invoked by the worker thread to process a SLI4 fast-path - * FCP aborted xri. - **/ -void -lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, - struct sli4_wcqe_xri_aborted *axri) -{ - uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); - struct lpfc_scsi_buf *psb, *next_psb; - unsigned long iflag = 0; - - spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); - list_for_each_entry_safe(psb, next_psb, - &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { - if (psb->cur_iocbq.sli4_xritag == xri) { - list_del(&psb->list); - psb->status = IOSTAT_SUCCESS; - spin_unlock_irqrestore( - &phba->sli4_hba.abts_scsi_buf_list_lock, - iflag); - lpfc_release_scsi_buf_s4(phba, psb); - return; - } + /* + * Get memory from the pci pool to map the virt space to pci bus space + * for an I/O. The DMA buffer includes space for the struct fcp_cmnd, + * struct fcp_rsp and the number of bde's necessary to support the + * sg_tablesize. + */ + psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL, + &psb->dma_handle); + if (!psb->data) { + kfree(psb); + return NULL; } - spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, - iflag); -} -/** - * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block - * @phba: pointer to lpfc hba data structure. - * - * This routine walks the list of scsi buffers that have been allocated and - * repost them to the HBA by using SGL block post. This is needed after a - * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine - * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list - * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers. - * - * Returns: 0 = success, non-zero failure. - **/ -int -lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) -{ - struct lpfc_scsi_buf *psb; - int index, status, bcnt = 0, rcnt = 0, rc = 0; - LIST_HEAD(sblist); - - for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) { - psb = phba->sli4_hba.lpfc_scsi_psb_array[index]; - if (psb) { - /* Remove from SCSI buffer list */ - list_del(&psb->list); - /* Add it to a local SCSI buffer list */ - list_add_tail(&psb->list, &sblist); - if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) { - bcnt = rcnt; - rcnt = 0; - } - } else - /* A hole present in the XRI array, need to skip */ - bcnt = rcnt; - - if (index == phba->sli4_hba.scsi_xri_cnt - 1) - /* End of XRI array for SCSI buffer, complete */ - bcnt = rcnt; + /* Initialize virtual ptrs to dma_buf region. */ + memset(psb->data, 0, phba->cfg_sg_dma_buf_size); - /* Continue until collect up to a nembed page worth of sgls */ - if (bcnt == 0) - continue; - /* Now, post the SCSI buffer list sgls as a block */ - status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); - /* Reset SCSI buffer count for next round of posting */ - bcnt = 0; - while (!list_empty(&sblist)) { - list_remove_head(&sblist, psb, struct lpfc_scsi_buf, - list); - if (status) { - /* Put this back on the abort scsi list */ - psb->status = IOSTAT_LOCAL_REJECT; - psb->result = IOERR_ABORT_REQUESTED; - rc++; - } else - psb->status = IOSTAT_SUCCESS; - /* Put it back into the SCSI buffer list */ - lpfc_release_scsi_buf_s4(phba, psb); - } + /* Allocate iotag for psb->cur_iocbq. */ + iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); + if (iotag == 0) { + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + psb->data, psb->dma_handle); + kfree (psb); + return NULL; } - return rc; -} + psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; -/** - * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec - * @vport: The virtual port for which this call being executed. - * @num_to_allocate: The requested number of buffers to allocate. - * - * This routine allocates a scsi buffer for device with SLI-4 interface spec, - * the scsi buffer contains all the necessary information needed to initiate - * a SCSI I/O. - * - * Return codes: - * int - number of scsi buffers that were allocated. - * 0 = failure, less than num_to_alloc is a partial failure. - **/ -static int -lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) -{ - struct lpfc_hba *phba = vport->phba; - struct lpfc_scsi_buf *psb; - struct sli4_sge *sgl; - IOCB_t *iocb; - dma_addr_t pdma_phys_fcp_cmd; - dma_addr_t pdma_phys_fcp_rsp; - dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; - uint16_t iotag, last_xritag = NO_XRI; - int status = 0, index; - int bcnt; - int non_sequential_xri = 0; - int rc = 0; - LIST_HEAD(sblist); - - for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { - psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); - if (!psb) - break; - - /* - * Get memory from the pci pool to map the virt space to pci bus - * space for an I/O. The DMA buffer includes space for the - * struct fcp_cmnd, struct fcp_rsp and the number of bde's - * necessary to support the sg_tablesize. - */ - psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, - GFP_KERNEL, &psb->dma_handle); - if (!psb->data) { - kfree(psb); - break; - } - - /* Initialize virtual ptrs to dma_buf region. */ - memset(psb->data, 0, phba->cfg_sg_dma_buf_size); - - /* Allocate iotag for psb->cur_iocbq. */ - iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); - if (iotag == 0) { - kfree(psb); - break; - } + psb->fcp_cmnd = psb->data; + psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd); + psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp); - psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba); - if (psb->cur_iocbq.sli4_xritag == NO_XRI) { - pci_pool_free(phba->lpfc_scsi_dma_buf_pool, - psb->data, psb->dma_handle); - kfree(psb); - break; - } - if (last_xritag != NO_XRI - && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) { - non_sequential_xri = 1; - } else - list_add_tail(&psb->list, &sblist); - last_xritag = psb->cur_iocbq.sli4_xritag; - - index = phba->sli4_hba.scsi_xri_cnt++; - psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; - - psb->fcp_bpl = psb->data; - psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) - - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); - psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + - sizeof(struct fcp_cmnd)); - - /* Initialize local short-hand pointers. */ - sgl = (struct sli4_sge *)psb->fcp_bpl; - pdma_phys_bpl = psb->dma_handle; - pdma_phys_fcp_cmd = - (psb->dma_handle + phba->cfg_sg_dma_buf_size) - - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); - pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); + /* Initialize local short-hand pointers. */ + bpl = psb->fcp_bpl; + pdma_phys_fcp_cmd = psb->dma_handle; + pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd); + pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp); - /* - * The first two bdes are the FCP_CMD and FCP_RSP. The balance - * are sg list bdes. Initialize the first two and leave the - * rest for queuecommand. - */ - sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); - sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); - bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd)); - bf_set(lpfc_sli4_sge_last, sgl, 0); - sgl->word2 = cpu_to_le32(sgl->word2); - sgl->word3 = cpu_to_le32(sgl->word3); - sgl++; - - /* Setup the physical region for the FCP RSP */ - sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp)); - sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp)); - bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp)); - bf_set(lpfc_sli4_sge_last, sgl, 1); - sgl->word2 = cpu_to_le32(sgl->word2); - sgl->word3 = cpu_to_le32(sgl->word3); + /* + * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg + * list bdes. Initialize the first two and leave the rest for + * queuecommand. + */ + bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd)); + bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd)); + bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd); + bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w); + + /* Setup the physical region for the FCP RSP */ + bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp)); + bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp)); + bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp); + bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64; + bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w); - /* - * Since the IOCB for the FCP I/O is built into this - * lpfc_scsi_buf, initialize it with all known data now. - */ - iocb = &psb->cur_iocbq.iocb; - iocb->un.fcpi64.bdl.ulpIoTag32 = 0; - iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64; - /* setting the BLP size to 2 * sizeof BDE may not be correct. - * We are setting the bpl to point to out sgl. An sgl's - * entries are 16 bytes, a bpl entries are 12 bytes. - */ + /* + * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf, + * initialize it with all known data now. + */ + iocb = &psb->cur_iocbq.iocb; + iocb->un.fcpi64.bdl.ulpIoTag32 = 0; + if ((phba->sli_rev == 3) && + !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { + /* fill in immediate fcp command BDE */ + iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED; iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd); - iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd); - iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd); + iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t, + unsli3.fcp_ext.icd); + iocb->un.fcpi64.bdl.addrHigh = 0; + iocb->ulpBdeCount = 0; + iocb->ulpLe = 0; + /* fill in responce BDE */ + iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; + iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize = + sizeof(struct fcp_rsp); + iocb->unsli3.fcp_ext.rbde.addrLow = + putPaddrLow(pdma_phys_fcp_rsp); + iocb->unsli3.fcp_ext.rbde.addrHigh = + putPaddrHigh(pdma_phys_fcp_rsp); + } else { + iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64; + iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64)); + iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl); + iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl); iocb->ulpBdeCount = 1; iocb->ulpLe = 1; - iocb->ulpClass = CLASS3; - if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) - pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; - else - pdma_phys_bpl1 = 0; - psb->dma_phys_bpl = pdma_phys_bpl; - phba->sli4_hba.lpfc_scsi_psb_array[index] = psb; - if (non_sequential_xri) { - status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl, - pdma_phys_bpl1, - psb->cur_iocbq.sli4_xritag); - if (status) { - /* Put this back on the abort scsi list */ - psb->status = IOSTAT_LOCAL_REJECT; - psb->result = IOERR_ABORT_REQUESTED; - rc++; - } else - psb->status = IOSTAT_SUCCESS; - /* Put it back into the SCSI buffer list */ - lpfc_release_scsi_buf_s4(phba, psb); - break; - } - } - if (bcnt) { - status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt); - /* Reset SCSI buffer count for next round of posting */ - while (!list_empty(&sblist)) { - list_remove_head(&sblist, psb, struct lpfc_scsi_buf, - list); - if (status) { - /* Put this back on the abort scsi list */ - psb->status = IOSTAT_LOCAL_REJECT; - psb->result = IOERR_ABORT_REQUESTED; - rc++; - } else - psb->status = IOSTAT_SUCCESS; - /* Put it back into the SCSI buffer list */ - lpfc_release_scsi_buf_s4(phba, psb); - } } + iocb->ulpClass = CLASS3; - return bcnt + non_sequential_xri - rc; + return psb; } /** - * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator - * @vport: The virtual port for which this call being executed. - * @num_to_allocate: The requested number of buffers to allocate. - * - * This routine wraps the actual SCSI buffer allocator function pointer from - * the lpfc_hba struct. - * - * Return codes: - * int - number of scsi buffers that were allocated. - * 0 = failure, less than num_to_alloc is a partial failure. - **/ -static inline int -lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc) -{ - return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc); -} - -/** - * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA - * @phba: The HBA for which this call is being executed. + * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba + * @phba: The Hba for which this call is being executed. * * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list * and returns to caller. @@ -890,7 +591,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba) } /** - * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list + * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list * @phba: The Hba for which this call is being executed. * @psb: The scsi buffer which is being released. * @@ -898,7 +599,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba) * lpfc_scsi_buf_list list. **/ static void -lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { unsigned long iflag = 0; @@ -909,69 +610,21 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) } /** - * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list. - * @phba: The Hba for which this call is being executed. - * @psb: The scsi buffer which is being released. - * - * This routine releases @psb scsi buffer by adding it to tail of @phba - * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer - * and cannot be reused for at least RA_TOV amount of time if it was - * aborted. - **/ -static void -lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) -{ - unsigned long iflag = 0; - - if (psb->status == IOSTAT_LOCAL_REJECT - && psb->result == IOERR_ABORT_REQUESTED) { - spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, - iflag); - psb->pCmd = NULL; - list_add_tail(&psb->list, - &phba->sli4_hba.lpfc_abts_scsi_buf_list); - spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, - iflag); - } else { - - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - psb->pCmd = NULL; - list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); - } -} - -/** - * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list. - * @phba: The Hba for which this call is being executed. - * @psb: The scsi buffer which is being released. - * - * This routine releases @psb scsi buffer by adding it to tail of @phba - * lpfc_scsi_buf_list list. - **/ -static void -lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) -{ - - phba->lpfc_release_scsi_buf(phba, psb); -} - -/** - * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec + * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer * @phba: The Hba for which this call is being executed. * @lpfc_cmd: The scsi buffer which is going to be mapped. * * This routine does the pci dma mapping for scatter-gather list of scsi cmnd - * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans - * through sg elements and format the bdea. This routine also initializes all - * IOCB fields which are dependent on scsi command request buffer. + * field of @lpfc_cmd. This routine scans through sg elements and format the + * bdea. This routine also initializes all IOCB fields which are dependent on + * scsi command request buffer. * * Return codes: * 1 - Error * 0 - Success **/ static int -lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) { struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; struct scatterlist *sgel = NULL; @@ -1758,133 +1411,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, return ret; } -/** - * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec - * @phba: The Hba for which this call is being executed. - * @lpfc_cmd: The scsi buffer which is going to be mapped. - * - * This routine does the pci dma mapping for scatter-gather list of scsi cmnd - * field of @lpfc_cmd for device with SLI-4 interface spec. - * - * Return codes: - * 1 - Error - * 0 - Success - **/ -static int -lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) -{ - struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; - struct scatterlist *sgel = NULL; - struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; - struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl; - IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; - dma_addr_t physaddr; - uint32_t num_bde = 0; - uint32_t dma_len; - uint32_t dma_offset = 0; - int nseg; - - /* - * There are three possibilities here - use scatter-gather segment, use - * the single mapping, or neither. Start the lpfc command prep by - * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first - * data bde entry. - */ - if (scsi_sg_count(scsi_cmnd)) { - /* - * The driver stores the segment count returned from pci_map_sg - * because this a count of dma-mappings used to map the use_sg - * pages. They are not guaranteed to be the same for those - * architectures that implement an IOMMU. - */ - - nseg = scsi_dma_map(scsi_cmnd); - if (unlikely(!nseg)) - return 1; - sgl += 1; - /* clear the last flag in the fcp_rsp map entry */ - sgl->word2 = le32_to_cpu(sgl->word2); - bf_set(lpfc_sli4_sge_last, sgl, 0); - sgl->word2 = cpu_to_le32(sgl->word2); - sgl += 1; - - lpfc_cmd->seg_cnt = nseg; - if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { - printk(KERN_ERR "%s: Too many sg segments from " - "dma_map_sg. Config %d, seg_cnt %d\n", - __func__, phba->cfg_sg_seg_cnt, - lpfc_cmd->seg_cnt); - scsi_dma_unmap(scsi_cmnd); - return 1; - } - - /* - * The driver established a maximum scatter-gather segment count - * during probe that limits the number of sg elements in any - * single scsi command. Just run through the seg_cnt and format - * the sge's. - * When using SLI-3 the driver will try to fit all the BDEs into - * the IOCB. If it can't then the BDEs get added to a BPL as it - * does for SLI-2 mode. - */ - scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { - physaddr = sg_dma_address(sgel); - dma_len = sg_dma_len(sgel); - bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel)); - sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); - sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); - if ((num_bde + 1) == nseg) - bf_set(lpfc_sli4_sge_last, sgl, 1); - else - bf_set(lpfc_sli4_sge_last, sgl, 0); - bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); - sgl->word2 = cpu_to_le32(sgl->word2); - sgl->word3 = cpu_to_le32(sgl->word3); - dma_offset += dma_len; - sgl++; - } - } else { - sgl += 1; - /* clear the last flag in the fcp_rsp map entry */ - sgl->word2 = le32_to_cpu(sgl->word2); - bf_set(lpfc_sli4_sge_last, sgl, 1); - sgl->word2 = cpu_to_le32(sgl->word2); - } - - /* - * Finish initializing those IOCB fields that are dependent on the - * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is - * explicitly reinitialized. - * all iocb memory resources are reused. - */ - fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); - - /* - * Due to difference in data length between DIF/non-DIF paths, - * we need to set word 4 of IOCB here - */ - iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); - return 0; -} - -/** - * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer - * @phba: The Hba for which this call is being executed. - * @lpfc_cmd: The scsi buffer which is going to be mapped. - * - * This routine wraps the actual DMA mapping function pointer from the - * lpfc_hba struct. - * - * Return codes: - * 1 - Error - * 0 - Success - **/ -static inline int -lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) -{ - return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); -} - /** * lpfc_send_scsi_error_event - Posts an event when there is SCSI error * @phba: Pointer to hba context object. @@ -1978,15 +1504,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, } /** - * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev - * @phba: The HBA for which this call is being executed. + * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather + * @phba: The Hba for which this call is being executed. * @psb: The scsi buffer which is going to be un-mapped. * * This routine does DMA un-mapping of scatter gather list of scsi command - * field of @lpfc_cmd for device with SLI-3 interface spec. + * field of @lpfc_cmd. **/ static void -lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) +lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb) { /* * There are only two special cases to consider. (1) the scsi command @@ -2002,36 +1528,6 @@ lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) psb->pCmd->sc_data_direction); } -/** - * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev - * @phba: The Hba for which this call is being executed. - * @psb: The scsi buffer which is going to be un-mapped. - * - * This routine does DMA un-mapping of scatter gather list of scsi command - * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to - * remove the sgl for this scsi buffer then we will do it here. For now - * we should be able to just call the sli3 unprep routine. - **/ -static void -lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) -{ - lpfc_scsi_unprep_dma_buf_s3(phba, psb); -} - -/** - * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list - * @phba: The Hba for which this call is being executed. - * @psb: The scsi buffer which is going to be un-mapped. - * - * This routine does DMA un-mapping of scatter gather list of scsi command - * field of @lpfc_cmd for device with SLI-4 interface spec. - **/ -static void -lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) -{ - phba->lpfc_scsi_unprep_dma_buf(phba, psb); -} - /** * lpfc_handler_fcp_err - FCP response handler * @vport: The virtual port for which this call is being executed. @@ -2180,7 +1676,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine * @phba: The Hba for which this call is being executed. * @pIocbIn: The command IOCBQ for the scsi cmnd. - * @pIocbOut: The response IOCBQ for the scsi cmnd. + * @pIocbOut: The response IOCBQ for the scsi cmnd . * * This routine assigns scsi command result by looking into response IOCB * status field appropriately. This routine handles QUEUE FULL condition as @@ -2461,16 +1957,16 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd) } /** - * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev + * lpfc_scsi_prep_cmnd - Routine to convert scsi cmnd to FCP information unit * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: The scsi command which needs to send. * @pnode: Pointer to lpfc_nodelist. * * This routine initializes fcp_cmnd and iocb data structure from scsi command - * to transfer for device with SLI3 interface spec. + * to transfer. **/ static void -lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, +lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_nodelist *pnode) { struct lpfc_hba *phba = vport->phba; @@ -2517,11 +2013,8 @@ lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, if (scsi_sg_count(scsi_cmnd)) { if (datadir == DMA_TO_DEVICE) { iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; - if (phba->sli_rev < LPFC_SLI_REV4) { - iocb_cmd->un.fcpi.fcpi_parm = 0; - iocb_cmd->ulpPU = 0; - } else - iocb_cmd->ulpPU = PARM_READ_CHECK; + iocb_cmd->un.fcpi.fcpi_parm = 0; + iocb_cmd->ulpPU = 0; fcp_cmnd->fcpCntl3 = WRITE_DATA; phba->fc4OutputRequests++; } else { @@ -2558,60 +2051,20 @@ lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, } /** - * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev - * @vport: The virtual port for which this call is being executed. - * @lpfc_cmd: The scsi command which needs to send. - * @pnode: Pointer to lpfc_nodelist. - * - * This routine initializes fcp_cmnd and iocb data structure from scsi command - * to transfer for device with SLI4 interface spec. - **/ -static void -lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, - struct lpfc_nodelist *pnode) -{ - /* - * The prep cmnd routines do not touch the sgl or its - * entries. We may not have to do anything different. - * I will leave this function in place until we can - * run some IO through the driver and determine if changes - * are needed. - */ - return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode); -} - -/** - * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit - * @vport: The virtual port for which this call is being executed. - * @lpfc_cmd: The scsi command which needs to send. - * @pnode: Pointer to lpfc_nodelist. - * - * This routine wraps the actual convert SCSI cmnd function pointer from - * the lpfc_hba struct. - **/ -static inline void -lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, - struct lpfc_nodelist *pnode) -{ - vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode); -} - -/** - * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit + * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit * @vport: The virtual port for which this call is being executed. * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. * @lun: Logical unit number. * @task_mgmt_cmd: SCSI task management command. * - * This routine creates FCP information unit corresponding to @task_mgmt_cmd - * for device with SLI-3 interface spec. + * This routine creates FCP information unit corresponding to @task_mgmt_cmd. * * Return codes: * 0 - Error * 1 - Success **/ static int -lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, +lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, unsigned int lun, uint8_t task_mgmt_cmd) @@ -2660,107 +2113,6 @@ lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport, return 1; } -/** - * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit - * @vport: The virtual port for which this call is being executed. - * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. - * @lun: Logical unit number. - * @task_mgmt_cmd: SCSI task management command. - * - * This routine creates FCP information unit corresponding to @task_mgmt_cmd - * for device with SLI-4 interface spec. - * - * Return codes: - * 0 - Error - * 1 - Success - **/ -static int -lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport, - struct lpfc_scsi_buf *lpfc_cmd, - unsigned int lun, - uint8_t task_mgmt_cmd) -{ - /* - * The prep cmnd routines do not touch the sgl or its - * entries. We may not have to do anything different. - * I will leave this function in place until we can - * run some IO through the driver and determine if changes - * are needed. - */ - return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun, - task_mgmt_cmd); -} - -/** - * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info - * @vport: The virtual port for which this call is being executed. - * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. - * @lun: Logical unit number. - * @task_mgmt_cmd: SCSI task management command. - * - * This routine wraps the actual convert SCSI TM to FCP information unit - * function pointer from the lpfc_hba struct. - * - * Return codes: - * 0 - Error - * 1 - Success - **/ -static inline int -lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport, - struct lpfc_scsi_buf *lpfc_cmd, - unsigned int lun, - uint8_t task_mgmt_cmd) -{ - struct lpfc_hba *phba = vport->phba; - - return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun, - task_mgmt_cmd); -} - -/** - * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table - * @phba: The hba struct for which this call is being executed. - * @dev_grp: The HBA PCI-Device group number. - * - * This routine sets up the SCSI interface API function jump table in @phba - * struct. - * Returns: 0 - success, -ENODEV - failure. - **/ -int -lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) -{ - - switch (dev_grp) { - case LPFC_PCI_DEV_LP: - phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; - phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; - phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3; - phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3; - phba->lpfc_scsi_prep_task_mgmt_cmd = - lpfc_scsi_prep_task_mgmt_cmd_s3; - phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; - break; - case LPFC_PCI_DEV_OC: - phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; - phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; - phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4; - phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4; - phba->lpfc_scsi_prep_task_mgmt_cmd = - lpfc_scsi_prep_task_mgmt_cmd_s4; - phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1418 Invalid HBA PCI-device group: 0x%x\n", - dev_grp); - return -ENODEV; - break; - } - phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf; - phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth; - return 0; -} - /** * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command * @phba: The Hba for which this call is being executed. @@ -2826,8 +2178,9 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport, lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0702 Issue Target Reset to TGT %d Data: x%x x%x\n", tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); - status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, - iocbq, iocbqrsp, lpfc_cmd->timeout); + status = lpfc_sli_issue_iocb_wait(phba, + &phba->sli.ring[phba->sli.fcp_ring], + iocbq, iocbqrsp, lpfc_cmd->timeout); if (status != IOCB_SUCCESS) { if (status == IOCB_TIMEDOUT) { iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; @@ -2952,6 +2305,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) struct Scsi_Host *shost = cmnd->device->host; struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; + struct lpfc_sli *psli = &phba->sli; struct lpfc_rport_data *rdata = cmnd->device->hostdata; struct lpfc_nodelist *ndlp = rdata->pnode; struct lpfc_scsi_buf *lpfc_cmd; @@ -3073,7 +2427,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); atomic_inc(&ndlp->cmd_pending); - err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, + err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring], &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB); if (err) { atomic_dec(&ndlp->cmd_pending); @@ -3136,6 +2490,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) struct Scsi_Host *shost = cmnd->device->host; struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; + struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring]; struct lpfc_iocbq *iocb; struct lpfc_iocbq *abtsiocb; struct lpfc_scsi_buf *lpfc_cmd; @@ -3176,10 +2531,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) icmd = &abtsiocb->iocb; icmd->un.acxri.abortType = ABORT_TYPE_ABTS; icmd->un.acxri.abortContextTag = cmd->ulpContext; - if (phba->sli_rev == LPFC_SLI_REV4) - icmd->un.acxri.abortIoTag = iocb->sli4_xritag; - else - icmd->un.acxri.abortIoTag = cmd->ulpIoTag; + icmd->un.acxri.abortIoTag = cmd->ulpIoTag; icmd->ulpLe = 1; icmd->ulpClass = cmd->ulpClass; @@ -3190,8 +2542,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; abtsiocb->vport = vport; - if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == - IOCB_ERROR) { + if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, abtsiocb); ret = FAILED; goto out; @@ -3317,7 +2668,8 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) "0703 Issue target reset to TGT %d LUN %d " "rpi x%x nlp_flag x%x\n", cmnd->device->id, cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag); - status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, + status = lpfc_sli_issue_iocb_wait(phba, + &phba->sli.ring[phba->sli.fcp_ring], iocbq, iocbqrsp, lpfc_cmd->timeout); if (status == IOCB_TIMEDOUT) { iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl; @@ -3473,10 +2825,11 @@ lpfc_slave_alloc(struct scsi_device *sdev) { struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; struct lpfc_hba *phba = vport->phba; + struct lpfc_scsi_buf *scsi_buf = NULL; struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); - uint32_t total = 0; + uint32_t total = 0, i; uint32_t num_to_alloc = 0; - int num_allocated = 0; + unsigned long flags; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; @@ -3510,13 +2863,20 @@ lpfc_slave_alloc(struct scsi_device *sdev) (phba->cfg_hba_queue_depth - total)); num_to_alloc = phba->cfg_hba_queue_depth - total; } - num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); - if (num_to_alloc != num_allocated) { - lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, - "0708 Allocation request of %d " - "command buffers did not succeed. " - "Allocated %d buffers.\n", - num_to_alloc, num_allocated); + + for (i = 0; i < num_to_alloc; i++) { + scsi_buf = lpfc_new_scsi_buf(vport); + if (!scsi_buf) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0706 Failed to allocate " + "command buffer\n"); + break; + } + + spin_lock_irqsave(&phba->scsi_buf_list_lock, flags); + phba->total_scsi_bufs++; + list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list); + spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags); } return 0; } diff --git a/trunk/drivers/scsi/lpfc/lpfc_scsi.h b/trunk/drivers/scsi/lpfc/lpfc_scsi.h index 65dfc8bd5b49..c7c440d5fa29 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_scsi.h +++ b/trunk/drivers/scsi/lpfc/lpfc_scsi.h @@ -140,8 +140,6 @@ struct lpfc_scsi_buf { struct fcp_rsp *fcp_rsp; struct ulp_bde64 *fcp_bpl; - dma_addr_t dma_phys_bpl; - /* cur_iocbq has phys of the dma-able buffer. * Iotag is in here */ diff --git a/trunk/drivers/scsi/lpfc/lpfc_sli.c b/trunk/drivers/scsi/lpfc/lpfc_sli.c index ff04daf18f48..eb5c75c45ba4 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_sli.c +++ b/trunk/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@ /******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2004-2009 Emulex. All rights reserved. * + * Copyright (C) 2004-2008 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.emulex.com * * Portions Copyright (C) 2004-2005 Christoph Hellwig * @@ -29,12 +29,9 @@ #include #include #include -#include -#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" -#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -43,7 +40,24 @@ #include "lpfc_logmsg.h" #include "lpfc_compat.h" #include "lpfc_debugfs.h" -#include "lpfc_vport.h" + +/* + * Define macro to log: Mailbox command x%x cannot issue Data + * This allows multiple uses of lpfc_msgBlk0311 + * w/o perturbing log msg utility. + */ +#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \ + lpfc_printf_log(phba, \ + KERN_INFO, \ + LOG_MBOX | LOG_SLI, \ + "(%d):0311 Mailbox command x%x cannot " \ + "issue Data: x%x x%x x%x\n", \ + pmbox->vport ? pmbox->vport->vpi : 0, \ + pmbox->mb.mbxCommand, \ + phba->pport->port_state, \ + psli->sli_flag, \ + flag) + /* There are only four IOCB completion types. */ typedef enum _lpfc_iocb_type { @@ -53,350 +67,6 @@ typedef enum _lpfc_iocb_type { LPFC_ABORT_IOCB } lpfc_iocb_type; - -/* Provide function prototypes local to this module. */ -static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *, - uint32_t); -static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *, - uint8_t *, uint32_t *); - -static IOCB_t * -lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) -{ - return &iocbq->iocb; -} - -/** - * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue - * @q: The Work Queue to operate on. - * @wqe: The work Queue Entry to put on the Work queue. - * - * This routine will copy the contents of @wqe to the next available entry on - * the @q. This function will then ring the Work Queue Doorbell to signal the - * HBA to start processing the Work Queue Entry. This function returns 0 if - * successful. If no entries are available on @q then this function will return - * -ENOMEM. - * The caller is expected to hold the hbalock when calling this routine. - **/ -static uint32_t -lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) -{ - union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe; - struct lpfc_register doorbell; - uint32_t host_index; - - /* If the host has not yet processed the next entry then we are done */ - if (((q->host_index + 1) % q->entry_count) == q->hba_index) - return -ENOMEM; - /* set consumption flag every once in a while */ - if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL)) - bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1); - - lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); - - /* Update the host index before invoking device */ - host_index = q->host_index; - q->host_index = ((q->host_index + 1) % q->entry_count); - - /* Ring Doorbell */ - doorbell.word0 = 0; - bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1); - bf_set(lpfc_wq_doorbell_index, &doorbell, host_index); - bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id); - writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr); - readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */ - - return 0; -} - -/** - * lpfc_sli4_wq_release - Updates internal hba index for WQ - * @q: The Work Queue to operate on. - * @index: The index to advance the hba index to. - * - * This routine will update the HBA index of a queue to reflect consumption of - * Work Queue Entries by the HBA. When the HBA indicates that it has consumed - * an entry the host calls this function to update the queue's internal - * pointers. This routine returns the number of entries that were consumed by - * the HBA. - **/ -static uint32_t -lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) -{ - uint32_t released = 0; - - if (q->hba_index == index) - return 0; - do { - q->hba_index = ((q->hba_index + 1) % q->entry_count); - released++; - } while (q->hba_index != index); - return released; -} - -/** - * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue - * @q: The Mailbox Queue to operate on. - * @wqe: The Mailbox Queue Entry to put on the Work queue. - * - * This routine will copy the contents of @mqe to the next available entry on - * the @q. This function will then ring the Work Queue Doorbell to signal the - * HBA to start processing the Work Queue Entry. This function returns 0 if - * successful. If no entries are available on @q then this function will return - * -ENOMEM. - * The caller is expected to hold the hbalock when calling this routine. - **/ -static uint32_t -lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) -{ - struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe; - struct lpfc_register doorbell; - uint32_t host_index; - - /* If the host has not yet processed the next entry then we are done */ - if (((q->host_index + 1) % q->entry_count) == q->hba_index) - return -ENOMEM; - lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size); - /* Save off the mailbox pointer for completion */ - q->phba->mbox = (MAILBOX_t *)temp_mqe; - - /* Update the host index before invoking device */ - host_index = q->host_index; - q->host_index = ((q->host_index + 1) % q->entry_count); - - /* Ring Doorbell */ - doorbell.word0 = 0; - bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1); - bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); - writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); - readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */ - return 0; -} - -/** - * lpfc_sli4_mq_release - Updates internal hba index for MQ - * @q: The Mailbox Queue to operate on. - * - * This routine will update the HBA index of a queue to reflect consumption of - * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed - * an entry the host calls this function to update the queue's internal - * pointers. This routine returns the number of entries that were consumed by - * the HBA. - **/ -static uint32_t -lpfc_sli4_mq_release(struct lpfc_queue *q) -{ - /* Clear the mailbox pointer for completion */ - q->phba->mbox = NULL; - q->hba_index = ((q->hba_index + 1) % q->entry_count); - return 1; -} - -/** - * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ - * @q: The Event Queue to get the first valid EQE from - * - * This routine will get the first valid Event Queue Entry from @q, update - * the queue's internal hba index, and return the EQE. If no valid EQEs are in - * the Queue (no more work to do), or the Queue is full of EQEs that have been - * processed, but not popped back to the HBA then this routine will return NULL. - **/ -static struct lpfc_eqe * -lpfc_sli4_eq_get(struct lpfc_queue *q) -{ - struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe; - - /* If the next EQE is not valid then we are done */ - if (!bf_get(lpfc_eqe_valid, eqe)) - return NULL; - /* If the host has not yet processed the next entry then we are done */ - if (((q->hba_index + 1) % q->entry_count) == q->host_index) - return NULL; - - q->hba_index = ((q->hba_index + 1) % q->entry_count); - return eqe; -} - -/** - * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ - * @q: The Event Queue that the host has completed processing for. - * @arm: Indicates whether the host wants to arms this CQ. - * - * This routine will mark all Event Queue Entries on @q, from the last - * known completed entry to the last entry that was processed, as completed - * by clearing the valid bit for each completion queue entry. Then it will - * notify the HBA, by ringing the doorbell, that the EQEs have been processed. - * The internal host index in the @q will be updated by this routine to indicate - * that the host has finished processing the entries. The @arm parameter - * indicates that the queue should be rearmed when ringing the doorbell. - * - * This function will return the number of EQEs that were popped. - **/ -uint32_t -lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm) -{ - uint32_t released = 0; - struct lpfc_eqe *temp_eqe; - struct lpfc_register doorbell; - - /* while there are valid entries */ - while (q->hba_index != q->host_index) { - temp_eqe = q->qe[q->host_index].eqe; - bf_set(lpfc_eqe_valid, temp_eqe, 0); - released++; - q->host_index = ((q->host_index + 1) % q->entry_count); - } - if (unlikely(released == 0 && !arm)) - return 0; - - /* ring doorbell for number popped */ - doorbell.word0 = 0; - if (arm) { - bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); - bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1); - } - bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); - bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); - bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); - writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); - return released; -} - -/** - * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ - * @q: The Completion Queue to get the first valid CQE from - * - * This routine will get the first valid Completion Queue Entry from @q, update - * the queue's internal hba index, and return the CQE. If no valid CQEs are in - * the Queue (no more work to do), or the Queue is full of CQEs that have been - * processed, but not popped back to the HBA then this routine will return NULL. - **/ -static struct lpfc_cqe * -lpfc_sli4_cq_get(struct lpfc_queue *q) -{ - struct lpfc_cqe *cqe; - - /* If the next CQE is not valid then we are done */ - if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe)) - return NULL; - /* If the host has not yet processed the next entry then we are done */ - if (((q->hba_index + 1) % q->entry_count) == q->host_index) - return NULL; - - cqe = q->qe[q->hba_index].cqe; - q->hba_index = ((q->hba_index + 1) % q->entry_count); - return cqe; -} - -/** - * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ - * @q: The Completion Queue that the host has completed processing for. - * @arm: Indicates whether the host wants to arms this CQ. - * - * This routine will mark all Completion queue entries on @q, from the last - * known completed entry to the last entry that was processed, as completed - * by clearing the valid bit for each completion queue entry. Then it will - * notify the HBA, by ringing the doorbell, that the CQEs have been processed. - * The internal host index in the @q will be updated by this routine to indicate - * that the host has finished processing the entries. The @arm parameter - * indicates that the queue should be rearmed when ringing the doorbell. - * - * This function will return the number of CQEs that were released. - **/ -uint32_t -lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm) -{ - uint32_t released = 0; - struct lpfc_cqe *temp_qe; - struct lpfc_register doorbell; - - /* while there are valid entries */ - while (q->hba_index != q->host_index) { - temp_qe = q->qe[q->host_index].cqe; - bf_set(lpfc_cqe_valid, temp_qe, 0); - released++; - q->host_index = ((q->host_index + 1) % q->entry_count); - } - if (unlikely(released == 0 && !arm)) - return 0; - - /* ring doorbell for number popped */ - doorbell.word0 = 0; - if (arm) - bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); - bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); - bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); - bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id); - writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); - return released; -} - -/** - * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue - * @q: The Header Receive Queue to operate on. - * @wqe: The Receive Queue Entry to put on the Receive queue. - * - * This routine will copy the contents of @wqe to the next available entry on - * the @q. This function will then ring the Receive Queue Doorbell to signal the - * HBA to start processing the Receive Queue Entry. This function returns the - * index that the rqe was copied to if successful. If no entries are available - * on @q then this function will return -ENOMEM. - * The caller is expected to hold the hbalock when calling this routine. - **/ -static int -lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, - struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe) -{ - struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe; - struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe; - struct lpfc_register doorbell; - int put_index = hq->host_index; - - if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ) - return -EINVAL; - if (hq->host_index != dq->host_index) - return -EINVAL; - /* If the host has not yet processed the next entry then we are done */ - if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index) - return -EBUSY; - lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size); - lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size); - - /* Update the host index to point to the next slot */ - hq->host_index = ((hq->host_index + 1) % hq->entry_count); - dq->host_index = ((dq->host_index + 1) % dq->entry_count); - - /* Ring The Header Receive Queue Doorbell */ - if (!(hq->host_index % LPFC_RQ_POST_BATCH)) { - doorbell.word0 = 0; - bf_set(lpfc_rq_doorbell_num_posted, &doorbell, - LPFC_RQ_POST_BATCH); - bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id); - writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr); - } - return put_index; -} - -/** - * lpfc_sli4_rq_release - Updates internal hba index for RQ - * @q: The Header Receive Queue to operate on. - * - * This routine will update the HBA index of a queue to reflect consumption of - * one Receive Queue Entry by the HBA. When the HBA indicates that it has - * consumed an entry the host calls this function to update the queue's - * internal pointers. This routine returns the number of entries that were - * consumed by the HBA. - **/ -static uint32_t -lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq) -{ - if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ)) - return 0; - hq->hba_index = ((hq->hba_index + 1) % hq->entry_count); - dq->hba_index = ((dq->hba_index + 1) % dq->entry_count); - return 1; -} - /** * lpfc_cmd_iocb - Get next command iocb entry in the ring * @phba: Pointer to HBA context object. @@ -450,76 +120,6 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba) return iocbq; } -/** - * __lpfc_clear_active_sglq - Remove the active sglq for this XRI. - * @phba: Pointer to HBA context object. - * @xritag: XRI value. - * - * This function clears the sglq pointer from the array of acive - * sglq's. The xritag that is passed in is used to index into the - * array. Before the xritag can be used it needs to be adjusted - * by subtracting the xribase. - * - * Returns sglq ponter = success, NULL = Failure. - **/ -static struct lpfc_sglq * -__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) -{ - uint16_t adj_xri; - struct lpfc_sglq *sglq; - adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; - if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) - return NULL; - sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri]; - phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL; - return sglq; -} - -/** - * __lpfc_get_active_sglq - Get the active sglq for this XRI. - * @phba: Pointer to HBA context object. - * @xritag: XRI value. - * - * This function returns the sglq pointer from the array of acive - * sglq's. The xritag that is passed in is used to index into the - * array. Before the xritag can be used it needs to be adjusted - * by subtracting the xribase. - * - * Returns sglq ponter = success, NULL = Failure. - **/ -static struct lpfc_sglq * -__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) -{ - uint16_t adj_xri; - struct lpfc_sglq *sglq; - adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base; - if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri) - return NULL; - sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri]; - return sglq; -} - -/** - * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool - * @phba: Pointer to HBA context object. - * - * This function is called with hbalock held. This function - * Gets a new driver sglq object from the sglq list. If the - * list is not empty then it is successful, it returns pointer to the newly - * allocated sglq object else it returns NULL. - **/ -static struct lpfc_sglq * -__lpfc_sli_get_sglq(struct lpfc_hba *phba) -{ - struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list; - struct lpfc_sglq *sglq = NULL; - uint16_t adj_xri; - list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list); - adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; - phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; - return sglq; -} - /** * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool * @phba: Pointer to HBA context object. @@ -542,60 +142,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba) } /** - * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool - * @phba: Pointer to HBA context object. - * @iocbq: Pointer to driver iocb object. - * - * This function is called with hbalock held to release driver - * iocb object to the iocb pool. The iotag in the iocb object - * does not change for each use of the iocb object. This function - * clears all other fields of the iocb object when it is freed. - * The sqlq structure that holds the xritag and phys and virtual - * mappings for the scatter gather list is retrieved from the - * active array of sglq. The get of the sglq pointer also clears - * the entry in the array. If the status of the IO indiactes that - * this IO was aborted then the sglq entry it put on the - * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the - * IO has good status or fails for any other reason then the sglq - * entry is added to the free list (lpfc_sgl_list). - **/ -static void -__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) -{ - struct lpfc_sglq *sglq; - size_t start_clean = offsetof(struct lpfc_iocbq, iocb); - unsigned long iflag; - - if (iocbq->sli4_xritag == NO_XRI) - sglq = NULL; - else - sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); - if (sglq) { - if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED - || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT) - && (iocbq->iocb.un.ulpWord[4] - == IOERR_SLI_ABORTED))) { - spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, - iflag); - list_add(&sglq->list, - &phba->sli4_hba.lpfc_abts_els_sgl_list); - spin_unlock_irqrestore( - &phba->sli4_hba.abts_sgl_list_lock, iflag); - } else - list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); - } - - - /* - * Clean all volatile data fields, preserve iotag and node struct. - */ - memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); - iocbq->sli4_xritag = NO_XRI; - list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); -} - -/** - * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool + * __lpfc_sli_release_iocbq - Release iocb to the iocb pool * @phba: Pointer to HBA context object. * @iocbq: Pointer to driver iocb object. * @@ -605,7 +152,7 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) * clears all other fields of the iocb object when it is freed. **/ static void -__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) +__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) { size_t start_clean = offsetof(struct lpfc_iocbq, iocb); @@ -613,26 +160,9 @@ __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) * Clean all volatile data fields, preserve iotag and node struct. */ memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean); - iocbq->sli4_xritag = NO_XRI; list_add_tail(&iocbq->list, &phba->lpfc_iocb_list); } -/** - * __lpfc_sli_release_iocbq - Release iocb to the iocb pool - * @phba: Pointer to HBA context object. - * @iocbq: Pointer to driver iocb object. - * - * This function is called with hbalock held to release driver - * iocb object to the iocb pool. The iotag in the iocb object - * does not change for each use of the iocb object. This function - * clears all other fields of the iocb object when it is freed. - **/ -static void -__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) -{ - phba->__lpfc_sli_release_iocbq(phba, iocbq); -} - /** * lpfc_sli_release_iocbq - Release iocb to the iocb pool * @phba: Pointer to HBA context object. @@ -751,14 +281,6 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd) case CMD_GEN_REQUEST64_CR: case CMD_GEN_REQUEST64_CX: case CMD_XMIT_ELS_RSP64_CX: - case DSSCMD_IWRITE64_CR: - case DSSCMD_IWRITE64_CX: - case DSSCMD_IREAD64_CR: - case DSSCMD_IREAD64_CX: - case DSSCMD_INVALIDATE_DEK: - case DSSCMD_SET_KEK: - case DSSCMD_GET_KEK_ID: - case DSSCMD_GEN_XFER: type = LPFC_SOL_IOCB; break; case CMD_ABORT_XRI_CN: @@ -826,7 +348,7 @@ lpfc_sli_ring_map(struct lpfc_hba *phba) pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmb) return -ENOMEM; - pmbox = &pmb->u.mb; + pmbox = &pmb->mb; phba->link_state = LPFC_INIT_MBX_CMDS; for (i = 0; i < psli->num_rings; i++) { lpfc_config_ring(phba, i, pmb); @@ -1257,8 +779,8 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) phba->hbqs[i].buffer_count = 0; } /* Return all HBQ buffer that are in-fly */ - list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list, - list) { + list_for_each_entry_safe(dmabuf, next_dmabuf, + &phba->hbqbuf_in_list, list) { hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf); list_del(&hbq_buf->dbuf.list); if (hbq_buf->tag == -1) { @@ -1292,27 +814,9 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba) * pointer to the hbq entry if it successfully post the buffer * else it will return NULL. **/ -static int +static struct lpfc_hbq_entry * lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno, struct hbq_dmabuf *hbq_buf) -{ - return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf); -} - -/** - * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware - * @phba: Pointer to HBA context object. - * @hbqno: HBQ number. - * @hbq_buf: Pointer to HBQ buffer. - * - * This function is called with the hbalock held to post a hbq buffer to the - * firmware. If the function finds an empty slot in the HBQ, it will post the - * buffer and place it on the hbq_buffer_list. The function will return zero if - * it successfully post the buffer else it will return an error. - **/ -static int -lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, - struct hbq_dmabuf *hbq_buf) { struct lpfc_hbq_entry *hbqe; dma_addr_t physaddr = hbq_buf->dbuf.phys; @@ -1334,40 +838,8 @@ lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno, /* flush */ readl(phba->hbq_put + hbqno); list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list); - return 0; - } else - return -ENOMEM; -} - -/** - * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware - * @phba: Pointer to HBA context object. - * @hbqno: HBQ number. - * @hbq_buf: Pointer to HBQ buffer. - * - * This function is called with the hbalock held to post an RQE to the SLI4 - * firmware. If able to post the RQE to the RQ it will queue the hbq entry to - * the hbq_buffer_list and return zero, otherwise it will return an error. - **/ -static int -lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno, - struct hbq_dmabuf *hbq_buf) -{ - int rc; - struct lpfc_rqe hrqe; - struct lpfc_rqe drqe; - - hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys); - hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys); - drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys); - drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys); - rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, - &hrqe, &drqe); - if (rc < 0) - return rc; - hbq_buf->tag = rc; - list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list); - return 0; + } + return hbqe; } /* HBQ for ELS and CT traffic. */ @@ -1442,7 +914,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count) dbuf.list); hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count | (hbqno << 16)); - if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { + if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { phba->hbqs[hbqno].buffer_count++; posted++; } else @@ -1492,25 +964,6 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) lpfc_hbq_defs[qno]->init_count)); } -/** - * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list - * @phba: Pointer to HBA context object. - * @hbqno: HBQ number. - * - * This function removes the first hbq buffer on an hbq list and returns a - * pointer to that buffer. If it finds no buffers on the list it returns NULL. - **/ -static struct hbq_dmabuf * -lpfc_sli_hbqbuf_get(struct list_head *rb_list) -{ - struct lpfc_dmabuf *d_buf; - - list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list); - if (!d_buf) - return NULL; - return container_of(d_buf, struct hbq_dmabuf, dbuf); -} - /** * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag * @phba: Pointer to HBA context object. @@ -1532,15 +985,12 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag) if (hbqno >= LPFC_MAX_HBQS) return NULL; - spin_lock_irq(&phba->hbalock); list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) { hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf); if (hbq_buf->tag == tag) { - spin_unlock_irq(&phba->hbalock); return hbq_buf; } } - spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT, "1803 Bad hbq tag. Data: x%x x%x\n", tag, phba->hbqs[tag >> 16].buffer_count); @@ -1563,8 +1013,9 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer) if (hbq_buffer) { hbqno = hbq_buffer->tag >> 16; - if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) + if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) { (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer); + } } } @@ -1635,15 +1086,6 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand) case MBX_HEARTBEAT: case MBX_PORT_CAPABILITIES: case MBX_PORT_IOV_CONTROL: - case MBX_SLI4_CONFIG: - case MBX_SLI4_REQ_FTRS: - case MBX_REG_FCFI: - case MBX_UNREG_FCFI: - case MBX_REG_VFI: - case MBX_UNREG_VFI: - case MBX_INIT_VPI: - case MBX_INIT_VFI: - case MBX_RESUME_RPI: ret = mbxCommand; break; default: @@ -1664,7 +1106,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand) * will wake up thread waiting on the wait queue pointed by context1 * of the mailbox. **/ -void +static void lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) { wait_queue_head_t *pdone_q; @@ -1698,7 +1140,7 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_dmabuf *mp; - uint16_t rpi, vpi; + uint16_t rpi; int rc; mp = (struct lpfc_dmabuf *) (pmb->context1); @@ -1708,30 +1150,24 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) kfree(mp); } - if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && - (phba->sli_rev == LPFC_SLI_REV4)) - lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); - /* * If a REG_LOGIN succeeded after node is destroyed or node * is in re-discovery driver need to cleanup the RPI. */ if (!(phba->pport->load_flag & FC_UNLOADING) && - pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 && - !pmb->u.mb.mbxStatus) { - rpi = pmb->u.mb.un.varWords[0]; - vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base; - lpfc_unreg_login(phba, vpi, rpi, pmb); + pmb->mb.mbxCommand == MBX_REG_LOGIN64 && + !pmb->mb.mbxStatus) { + + rpi = pmb->mb.un.varWords[0]; + lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb); pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); if (rc != MBX_NOT_FINISHED) return; } - if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG) - lpfc_sli4_mbox_cmd_free(phba, pmb); - else - mempool_free(pmb, phba->mbox_mem_pool); + mempool_free(pmb, phba->mbox_mem_pool); + return; } /** @@ -1768,7 +1204,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) if (pmb == NULL) break; - pmbox = &pmb->u.mb; + pmbox = &pmb->mb; if (pmbox->mbxCommand != MBX_HEARTBEAT) { if (pmb->vport) { @@ -1797,10 +1233,9 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) /* Unknow mailbox command compl */ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "(%d):0323 Unknown Mailbox command " - "x%x (x%x) Cmpl\n", + "%x Cmpl\n", pmb->vport ? pmb->vport->vpi : 0, - pmbox->mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, pmb)); + pmbox->mbxCommand); phba->link_state = LPFC_HBA_ERROR; phba->work_hs = HS_FFER3; lpfc_handle_eratt(phba); @@ -1815,29 +1250,29 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) LOG_MBOX | LOG_SLI, "(%d):0305 Mbox cmd cmpl " "error - RETRYing Data: x%x " - "(x%x) x%x x%x x%x\n", + "x%x x%x x%x\n", pmb->vport ? pmb->vport->vpi :0, pmbox->mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, - pmb), pmbox->mbxStatus, pmbox->un.varWords[0], pmb->vport->port_state); pmbox->mbxStatus = 0; pmbox->mbxOwner = OWN_HOST; + spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + spin_unlock_irq(&phba->hbalock); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); - if (rc != MBX_NOT_FINISHED) + if (rc == MBX_SUCCESS) continue; } } /* Mailbox cmd Cmpl */ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p " + "(%d):0307 Mailbox cmd x%x Cmpl x%p " "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", pmb->vport ? pmb->vport->vpi : 0, pmbox->mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, pmb), pmb->mbox_cmpl, *((uint32_t *) pmbox), pmbox->un.varWords[0], @@ -1882,45 +1317,6 @@ lpfc_sli_get_buff(struct lpfc_hba *phba, return &hbq_entry->dbuf; } -/** - * lpfc_complete_unsol_iocb - Complete an unsolicited sequence - * @phba: Pointer to HBA context object. - * @pring: Pointer to driver SLI ring object. - * @saveq: Pointer to the iocbq struct representing the sequence starting frame. - * @fch_r_ctl: the r_ctl for the first frame of the sequence. - * @fch_type: the type for the first frame of the sequence. - * - * This function is called with no lock held. This function uses the r_ctl and - * type of the received sequence to find the correct callback function to call - * to process the sequence. - **/ -static int -lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, - struct lpfc_iocbq *saveq, uint32_t fch_r_ctl, - uint32_t fch_type) -{ - int i; - - /* unSolicited Responses */ - if (pring->prt[0].profile) { - if (pring->prt[0].lpfc_sli_rcv_unsol_event) - (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, - saveq); - return 1; - } - /* We must search, based on rctl / type - for the right routine */ - for (i = 0; i < pring->num_mask; i++) { - if ((pring->prt[i].rctl == fch_r_ctl) && - (pring->prt[i].type == fch_type)) { - if (pring->prt[i].lpfc_sli_rcv_unsol_event) - (pring->prt[i].lpfc_sli_rcv_unsol_event) - (phba, pring, saveq); - return 1; - } - } - return 0; -} /** * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler @@ -1943,7 +1339,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, IOCB_t * irsp; WORD5 * w5p; uint32_t Rctl, Type; - uint32_t match; + uint32_t match, i; struct lpfc_iocbq *iocbq; struct lpfc_dmabuf *dmzbuf; @@ -2086,12 +1482,35 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } } - if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type)) - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "0313 Ring %d handler: unexpected Rctl x%x " - "Type x%x received\n", - pring->ringno, Rctl, Type); - + /* unSolicited Responses */ + if (pring->prt[0].profile) { + if (pring->prt[0].lpfc_sli_rcv_unsol_event) + (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring, + saveq); + match = 1; + } else { + /* We must search, based on rctl / type + for the right routine */ + for (i = 0; i < pring->num_mask; i++) { + if ((pring->prt[i].rctl == Rctl) + && (pring->prt[i].type == Type)) { + if (pring->prt[i].lpfc_sli_rcv_unsol_event) + (pring->prt[i].lpfc_sli_rcv_unsol_event) + (phba, pring, saveq); + match = 1; + break; + } + } + } + if (match == 0) { + /* Unexpected Rctl / Type received */ + /* Ring handler: unexpected + Rctl Type received */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0313 Ring %d handler: unexpected Rctl x%x " + "Type x%x received\n", + pring->ringno, Rctl, Type); + } return 1; } @@ -2132,37 +1551,6 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba, return NULL; } -/** - * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag - * @phba: Pointer to HBA context object. - * @pring: Pointer to driver SLI ring object. - * @iotag: IOCB tag. - * - * This function looks up the iocb_lookup table to get the command iocb - * corresponding to the given iotag. This function is called with the - * hbalock held. - * This function returns the command iocb object if it finds the command - * iocb else returns NULL. - **/ -static struct lpfc_iocbq * -lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba, - struct lpfc_sli_ring *pring, uint16_t iotag) -{ - struct lpfc_iocbq *cmd_iocb; - - if (iotag != 0 && iotag <= phba->sli.last_iotag) { - cmd_iocb = phba->sli.iocbq_lookup[iotag]; - list_del_init(&cmd_iocb->list); - pring->txcmplq_cnt--; - return cmd_iocb; - } - - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0372 iotag x%x is out off range: max iotag (x%x)\n", - iotag, phba->sli.last_iotag); - return NULL; -} - /** * lpfc_sli_process_sol_iocb - process solicited iocb completion * @phba: Pointer to HBA context object. @@ -2566,7 +1954,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { spin_unlock_irqrestore(&phba->hbalock, iflag); - phba->lpfc_rampdown_queue_depth(phba); + lpfc_rampdown_queue_depth(phba); spin_lock_irqsave(&phba->hbalock, iflag); } @@ -2680,215 +2068,39 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, } /** - * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb - * @phba: Pointer to HBA context object. - * @pring: Pointer to driver SLI ring object. - * @rspiocbp: Pointer to driver response IOCB object. - * - * This function is called from the worker thread when there is a slow-path - * response IOCB to process. This function chains all the response iocbs until - * seeing the iocb with the LE bit set. The function will call - * lpfc_sli_process_sol_iocb function if the response iocb indicates a - * completion of a command iocb. The function will call the - * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb. - * The function frees the resources or calls the completion handler if this - * iocb is an abort completion. The function returns NULL when the response - * iocb has the LE bit set and all the chained iocbs are processed, otherwise - * this function shall chain the iocb on to the iocb_continueq and return the - * response iocb passed in. - **/ -static struct lpfc_iocbq * -lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, - struct lpfc_iocbq *rspiocbp) -{ - struct lpfc_iocbq *saveq; - struct lpfc_iocbq *cmdiocbp; - struct lpfc_iocbq *next_iocb; - IOCB_t *irsp = NULL; - uint32_t free_saveq; - uint8_t iocb_cmd_type; - lpfc_iocb_type type; - unsigned long iflag; - int rc; - - spin_lock_irqsave(&phba->hbalock, iflag); - /* First add the response iocb to the countinueq list */ - list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); - pring->iocb_continueq_cnt++; - - /* Now, determine whetehr the list is completed for processing */ - irsp = &rspiocbp->iocb; - if (irsp->ulpLe) { - /* - * By default, the driver expects to free all resources - * associated with this iocb completion. - */ - free_saveq = 1; - saveq = list_get_first(&pring->iocb_continueq, - struct lpfc_iocbq, list); - irsp = &(saveq->iocb); - list_del_init(&pring->iocb_continueq); - pring->iocb_continueq_cnt = 0; - - pring->stats.iocb_rsp++; - - /* - * If resource errors reported from HBA, reduce - * queuedepths of the SCSI device. - */ - if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && - (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { - spin_unlock_irqrestore(&phba->hbalock, iflag); - phba->lpfc_rampdown_queue_depth(phba); - spin_lock_irqsave(&phba->hbalock, iflag); - } - - if (irsp->ulpStatus) { - /* Rsp ring error: IOCB */ - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "0328 Rsp Ring %d error: " - "IOCB Data: " - "x%x x%x x%x x%x " - "x%x x%x x%x x%x " - "x%x x%x x%x x%x " - "x%x x%x x%x x%x\n", - pring->ringno, - irsp->un.ulpWord[0], - irsp->un.ulpWord[1], - irsp->un.ulpWord[2], - irsp->un.ulpWord[3], - irsp->un.ulpWord[4], - irsp->un.ulpWord[5], - *(((uint32_t *) irsp) + 6), - *(((uint32_t *) irsp) + 7), - *(((uint32_t *) irsp) + 8), - *(((uint32_t *) irsp) + 9), - *(((uint32_t *) irsp) + 10), - *(((uint32_t *) irsp) + 11), - *(((uint32_t *) irsp) + 12), - *(((uint32_t *) irsp) + 13), - *(((uint32_t *) irsp) + 14), - *(((uint32_t *) irsp) + 15)); - } - - /* - * Fetch the IOCB command type and call the correct completion - * routine. Solicited and Unsolicited IOCBs on the ELS ring - * get freed back to the lpfc_iocb_list by the discovery - * kernel thread. - */ - iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; - type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); - switch (type) { - case LPFC_SOL_IOCB: - spin_unlock_irqrestore(&phba->hbalock, iflag); - rc = lpfc_sli_process_sol_iocb(phba, pring, saveq); - spin_lock_irqsave(&phba->hbalock, iflag); - break; - - case LPFC_UNSOL_IOCB: - spin_unlock_irqrestore(&phba->hbalock, iflag); - rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq); - spin_lock_irqsave(&phba->hbalock, iflag); - if (!rc) - free_saveq = 0; - break; - - case LPFC_ABORT_IOCB: - cmdiocbp = NULL; - if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) - cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, - saveq); - if (cmdiocbp) { - /* Call the specified completion routine */ - if (cmdiocbp->iocb_cmpl) { - spin_unlock_irqrestore(&phba->hbalock, - iflag); - (cmdiocbp->iocb_cmpl)(phba, cmdiocbp, - saveq); - spin_lock_irqsave(&phba->hbalock, - iflag); - } else - __lpfc_sli_release_iocbq(phba, - cmdiocbp); - } - break; - - case LPFC_UNKNOWN_IOCB: - if (irsp->ulpCommand == CMD_ADAPTER_MSG) { - char adaptermsg[LPFC_MAX_ADPTMSG]; - memset(adaptermsg, 0, LPFC_MAX_ADPTMSG); - memcpy(&adaptermsg[0], (uint8_t *)irsp, - MAX_MSG_DATA); - dev_warn(&((phba->pcidev)->dev), - "lpfc%d: %s\n", - phba->brd_no, adaptermsg); - } else { - /* Unknown IOCB command */ - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0335 Unknown IOCB " - "command Data: x%x " - "x%x x%x x%x\n", - irsp->ulpCommand, - irsp->ulpStatus, - irsp->ulpIoTag, - irsp->ulpContext); - } - break; - } - - if (free_saveq) { - list_for_each_entry_safe(rspiocbp, next_iocb, - &saveq->list, list) { - list_del(&rspiocbp->list); - __lpfc_sli_release_iocbq(phba, rspiocbp); - } - __lpfc_sli_release_iocbq(phba, saveq); - } - rspiocbp = NULL; - } - spin_unlock_irqrestore(&phba->hbalock, iflag); - return rspiocbp; -} - -/** - * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs + * lpfc_sli_handle_slow_ring_event - Handle ring events for non-FCP rings * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @mask: Host attention register mask for this ring. * - * This routine wraps the actual slow_ring event process routine from the - * API jump table function pointer from the lpfc_hba struct. + * This function is called from the worker thread when there is a ring + * event for non-fcp rings. The caller does not hold any lock . + * The function processes each response iocb in the response ring until it + * finds an iocb with LE bit set and chains all the iocbs upto the iocb with + * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the + * response iocb indicates a completion of a command iocb. The function + * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited + * iocb. The function frees the resources or calls the completion handler if + * this iocb is an abort completion. The function returns 0 when the allocated + * iocbs are not freed, otherwise returns 1. **/ -void +int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint32_t mask) -{ - phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask); -} - -/** - * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings - * @phba: Pointer to HBA context object. - * @pring: Pointer to driver SLI ring object. - * @mask: Host attention register mask for this ring. - * - * This function is called from the worker thread when there is a ring event - * for non-fcp rings. The caller does not hold any lock. The function will - * remove each response iocb in the response ring and calls the handle - * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. - **/ -static void -lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, - struct lpfc_sli_ring *pring, uint32_t mask) { struct lpfc_pgp *pgp; IOCB_t *entry; IOCB_t *irsp = NULL; struct lpfc_iocbq *rspiocbp = NULL; + struct lpfc_iocbq *next_iocb; + struct lpfc_iocbq *cmdiocbp; + struct lpfc_iocbq *saveq; + uint8_t iocb_cmd_type; + lpfc_iocb_type type; + uint32_t status, free_saveq; uint32_t portRspPut, portRspMax; + int rc = 1; unsigned long iflag; - uint32_t status; pgp = &phba->port_gp[pring->ringno]; spin_lock_irqsave(&phba->hbalock, iflag); @@ -2916,7 +2128,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, phba->work_hs = HS_FFER3; lpfc_handle_eratt(phba); - return; + return 1; } rmb(); @@ -2961,10 +2173,138 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx); - spin_unlock_irqrestore(&phba->hbalock, iflag); - /* Handle the response IOCB */ - rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp); - spin_lock_irqsave(&phba->hbalock, iflag); + list_add_tail(&rspiocbp->list, &(pring->iocb_continueq)); + + pring->iocb_continueq_cnt++; + if (irsp->ulpLe) { + /* + * By default, the driver expects to free all resources + * associated with this iocb completion. + */ + free_saveq = 1; + saveq = list_get_first(&pring->iocb_continueq, + struct lpfc_iocbq, list); + irsp = &(saveq->iocb); + list_del_init(&pring->iocb_continueq); + pring->iocb_continueq_cnt = 0; + + pring->stats.iocb_rsp++; + + /* + * If resource errors reported from HBA, reduce + * queuedepths of the SCSI device. + */ + if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) && + (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + lpfc_rampdown_queue_depth(phba); + spin_lock_irqsave(&phba->hbalock, iflag); + } + + if (irsp->ulpStatus) { + /* Rsp ring error: IOCB */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "0328 Rsp Ring %d error: " + "IOCB Data: " + "x%x x%x x%x x%x " + "x%x x%x x%x x%x " + "x%x x%x x%x x%x " + "x%x x%x x%x x%x\n", + pring->ringno, + irsp->un.ulpWord[0], + irsp->un.ulpWord[1], + irsp->un.ulpWord[2], + irsp->un.ulpWord[3], + irsp->un.ulpWord[4], + irsp->un.ulpWord[5], + *(((uint32_t *) irsp) + 6), + *(((uint32_t *) irsp) + 7), + *(((uint32_t *) irsp) + 8), + *(((uint32_t *) irsp) + 9), + *(((uint32_t *) irsp) + 10), + *(((uint32_t *) irsp) + 11), + *(((uint32_t *) irsp) + 12), + *(((uint32_t *) irsp) + 13), + *(((uint32_t *) irsp) + 14), + *(((uint32_t *) irsp) + 15)); + } + + /* + * Fetch the IOCB command type and call the correct + * completion routine. Solicited and Unsolicited + * IOCBs on the ELS ring get freed back to the + * lpfc_iocb_list by the discovery kernel thread. + */ + iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK; + type = lpfc_sli_iocb_cmd_type(iocb_cmd_type); + if (type == LPFC_SOL_IOCB) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + rc = lpfc_sli_process_sol_iocb(phba, pring, + saveq); + spin_lock_irqsave(&phba->hbalock, iflag); + } else if (type == LPFC_UNSOL_IOCB) { + spin_unlock_irqrestore(&phba->hbalock, iflag); + rc = lpfc_sli_process_unsol_iocb(phba, pring, + saveq); + spin_lock_irqsave(&phba->hbalock, iflag); + if (!rc) + free_saveq = 0; + } else if (type == LPFC_ABORT_IOCB) { + if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) && + ((cmdiocbp = + lpfc_sli_iocbq_lookup(phba, pring, + saveq)))) { + /* Call the specified completion + routine */ + if (cmdiocbp->iocb_cmpl) { + spin_unlock_irqrestore( + &phba->hbalock, + iflag); + (cmdiocbp->iocb_cmpl) (phba, + cmdiocbp, saveq); + spin_lock_irqsave( + &phba->hbalock, + iflag); + } else + __lpfc_sli_release_iocbq(phba, + cmdiocbp); + } + } else if (type == LPFC_UNKNOWN_IOCB) { + if (irsp->ulpCommand == CMD_ADAPTER_MSG) { + + char adaptermsg[LPFC_MAX_ADPTMSG]; + + memset(adaptermsg, 0, + LPFC_MAX_ADPTMSG); + memcpy(&adaptermsg[0], (uint8_t *) irsp, + MAX_MSG_DATA); + dev_warn(&((phba->pcidev)->dev), + "lpfc%d: %s\n", + phba->brd_no, adaptermsg); + } else { + /* Unknown IOCB command */ + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "0335 Unknown IOCB " + "command Data: x%x " + "x%x x%x x%x\n", + irsp->ulpCommand, + irsp->ulpStatus, + irsp->ulpIoTag, + irsp->ulpContext); + } + } + + if (free_saveq) { + list_for_each_entry_safe(rspiocbp, next_iocb, + &saveq->list, list) { + list_del(&rspiocbp->list); + __lpfc_sli_release_iocbq(phba, + rspiocbp); + } + __lpfc_sli_release_iocbq(phba, saveq); + } + rspiocbp = NULL; + } /* * If the port response put pointer has not been updated, sync @@ -2998,37 +2338,7 @@ lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba, } spin_unlock_irqrestore(&phba->hbalock, iflag); - return; -} - -/** - * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events - * @phba: Pointer to HBA context object. - * @pring: Pointer to driver SLI ring object. - * @mask: Host attention register mask for this ring. - * - * This function is called from the worker thread when there is a pending - * ELS response iocb on the driver internal slow-path response iocb worker - * queue. The caller does not hold any lock. The function will remove each - * response iocb from the response worker queue and calls the handle - * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it. - **/ -static void -lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba, - struct lpfc_sli_ring *pring, uint32_t mask) -{ - struct lpfc_iocbq *irspiocbq; - unsigned long iflag; - - while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) { - /* Get the response iocb from the head of work queue */ - spin_lock_irqsave(&phba->hbalock, iflag); - list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue, - irspiocbq, struct lpfc_iocbq, list); - spin_unlock_irqrestore(&phba->hbalock, iflag); - /* Process the response iocb */ - lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq); - } + return rc; } /** @@ -3110,7 +2420,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) } /** - * lpfc_sli_brdready_s3 - Check for sli3 host ready status + * lpfc_sli_brdready - Check for host status bits * @phba: Pointer to HBA context object. * @mask: Bit mask to be checked. * @@ -3122,8 +2432,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) * function returns 1 when HBA fail to restart otherwise returns * zero. **/ -static int -lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) +int +lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) { uint32_t status; int i = 0; @@ -3167,56 +2477,6 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask) return retval; } -/** - * lpfc_sli_brdready_s4 - Check for sli4 host ready status - * @phba: Pointer to HBA context object. - * @mask: Bit mask to be checked. - * - * This function checks the host status register to check if HBA is - * ready. This function will wait in a loop for the HBA to be ready - * If the HBA is not ready , the function will will reset the HBA PCI - * function again. The function returns 1 when HBA fail to be ready - * otherwise returns zero. - **/ -static int -lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask) -{ - uint32_t status; - int retval = 0; - - /* Read the HBA Host Status Register */ - status = lpfc_sli4_post_status_check(phba); - - if (status) { - phba->pport->port_state = LPFC_VPORT_UNKNOWN; - lpfc_sli_brdrestart(phba); - status = lpfc_sli4_post_status_check(phba); - } - - /* Check to see if any errors occurred during init */ - if (status) { - phba->link_state = LPFC_HBA_ERROR; - retval = 1; - } else - phba->sli4_hba.intr_enable = 0; - - return retval; -} - -/** - * lpfc_sli_brdready - Wrapper func for checking the hba readyness - * @phba: Pointer to HBA context object. - * @mask: Bit mask to be checked. - * - * This routine wraps the actual SLI3 or SLI4 hba readyness check routine - * from the API jump table function pointer from the lpfc_hba struct. - **/ -int -lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask) -{ - return phba->lpfc_sli_brdready(phba, mask); -} - #define BARRIER_TEST_PATTERN (0xdeadbeef) /** @@ -3272,7 +2532,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba) mdelay(1); if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) { - if (phba->sli.sli_flag & LPFC_SLI_ACTIVE || + if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE || phba->pport->stopped) goto restore_hc; else @@ -3353,9 +2613,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) return 1; } - spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~LPFC_SLI_ACTIVE; - spin_unlock_irq(&phba->hbalock); + psli->sli_flag &= ~LPFC_SLI2_ACTIVE; mempool_free(pmb, phba->mbox_mem_pool); @@ -3378,10 +2636,10 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) } spin_lock_irq(&phba->hbalock); psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - psli->mbox_active = NULL; phba->link_flag &= ~LS_IGNORE_ERATT; spin_unlock_irq(&phba->hbalock); + psli->mbox_active = NULL; lpfc_hba_down_post(phba); phba->link_state = LPFC_HBA_ERROR; @@ -3389,7 +2647,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba) } /** - * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA + * lpfc_sli_brdreset - Reset the HBA * @phba: Pointer to HBA context object. * * This function resets the HBA by writing HC_INITFF to the control @@ -3425,8 +2683,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) (cfg_value & ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); - psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA); - + psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA); /* Now toggle INITFF bit in the Host Control Register */ writel(HC_INITFF, phba->HCregaddr); mdelay(1); @@ -3453,79 +2710,20 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) } /** - * lpfc_sli4_brdreset - Reset a sli-4 HBA + * lpfc_sli_brdrestart - Restart the HBA * @phba: Pointer to HBA context object. * - * This function resets a SLI4 HBA. This function disables PCI layer parity - * checking during resets the device. The caller is not required to hold - * any locks. - * - * This function returns 0 always. + * This function is called in the SLI initialization code path to + * restart the HBA. The caller is not required to hold any lock. + * This function writes MBX_RESTART mailbox command to the SLIM and + * resets the HBA. At the end of the function, it calls lpfc_hba_down_post + * function to free any pending commands. The function enables + * POST only during the first initialization. The function returns zero. + * The function does not guarantee completion of MBX_RESTART mailbox + * command before the return of this function. **/ int -lpfc_sli4_brdreset(struct lpfc_hba *phba) -{ - struct lpfc_sli *psli = &phba->sli; - uint16_t cfg_value; - uint8_t qindx; - - /* Reset HBA */ - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "0295 Reset HBA Data: x%x x%x\n", - phba->pport->port_state, psli->sli_flag); - - /* perform board reset */ - phba->fc_eventTag = 0; - phba->pport->fc_myDID = 0; - phba->pport->fc_prevDID = 0; - - /* Turn off parity checking and serr during the physical reset */ - pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); - pci_write_config_word(phba->pcidev, PCI_COMMAND, - (cfg_value & - ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR))); - - spin_lock_irq(&phba->hbalock); - psli->sli_flag &= ~(LPFC_PROCESS_LA); - phba->fcf.fcf_flag = 0; - /* Clean up the child queue list for the CQs */ - list_del_init(&phba->sli4_hba.mbx_wq->list); - list_del_init(&phba->sli4_hba.els_wq->list); - list_del_init(&phba->sli4_hba.hdr_rq->list); - list_del_init(&phba->sli4_hba.dat_rq->list); - list_del_init(&phba->sli4_hba.mbx_cq->list); - list_del_init(&phba->sli4_hba.els_cq->list); - list_del_init(&phba->sli4_hba.rxq_cq->list); - for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++) - list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list); - for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++) - list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list); - spin_unlock_irq(&phba->hbalock); - - /* Now physically reset the device */ - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0389 Performing PCI function reset!\n"); - /* Perform FCoE PCI function reset */ - lpfc_pci_function_reset(phba); - - return 0; -} - -/** - * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba - * @phba: Pointer to HBA context object. - * - * This function is called in the SLI initialization code path to - * restart the HBA. The caller is not required to hold any lock. - * This function writes MBX_RESTART mailbox command to the SLIM and - * resets the HBA. At the end of the function, it calls lpfc_hba_down_post - * function to free any pending commands. The function enables - * POST only during the first initialization. The function returns zero. - * The function does not guarantee completion of MBX_RESTART mailbox - * command before the return of this function. - **/ -static int -lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) +lpfc_sli_brdrestart(struct lpfc_hba *phba) { MAILBOX_t *mb; struct lpfc_sli *psli; @@ -3564,7 +2762,7 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) lpfc_sli_brdreset(phba); phba->pport->stopped = 0; phba->link_state = LPFC_INIT_START; - phba->hba_flag = 0; + spin_unlock_irq(&phba->hbalock); memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); @@ -3578,55 +2776,6 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) return 0; } -/** - * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba - * @phba: Pointer to HBA context object. - * - * This function is called in the SLI initialization code path to restart - * a SLI4 HBA. The caller is not required to hold any lock. - * At the end of the function, it calls lpfc_hba_down_post function to - * free any pending commands. - **/ -static int -lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) -{ - struct lpfc_sli *psli = &phba->sli; - - - /* Restart HBA */ - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "0296 Restart HBA Data: x%x x%x\n", - phba->pport->port_state, psli->sli_flag); - - lpfc_sli4_brdreset(phba); - - spin_lock_irq(&phba->hbalock); - phba->pport->stopped = 0; - phba->link_state = LPFC_INIT_START; - phba->hba_flag = 0; - spin_unlock_irq(&phba->hbalock); - - memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); - psli->stats_start = get_seconds(); - - lpfc_hba_down_post(phba); - - return 0; -} - -/** - * lpfc_sli_brdrestart - Wrapper func for restarting hba - * @phba: Pointer to HBA context object. - * - * This routine wraps the actual SLI3 or SLI4 hba restart routine from the - * API jump table function pointer from the lpfc_hba struct. -**/ -int -lpfc_sli_brdrestart(struct lpfc_hba *phba) -{ - return phba->lpfc_sli_brdrestart(phba); -} - /** * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart * @phba: Pointer to HBA context object. @@ -3791,7 +2940,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba) if (!pmb) return -ENOMEM; - pmbox = &pmb->u.mb; + pmbox = &pmb->mb; /* Initialize the struct lpfc_sli_hbq structure for each hbq */ phba->link_state = LPFC_INIT_MBX_CMDS; @@ -3834,26 +2983,6 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba) return 0; } -/** - * lpfc_sli4_rb_setup - Initialize and post RBs to HBA - * @phba: Pointer to HBA context object. - * - * This function is called during the SLI initialization to configure - * all the HBQs and post buffers to the HBQ. The caller is not - * required to hold any locks. This function will return zero if successful - * else it will return negative error code. - **/ -static int -lpfc_sli4_rb_setup(struct lpfc_hba *phba) -{ - phba->hbq_in_use = 1; - phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count; - phba->hbq_count = 1; - /* Initially populate or replenish the HBQs */ - lpfc_sli_hbqbuf_init_hbqs(phba, 0); - return 0; -} - /** * lpfc_sli_config_port - Issue config port mailbox command * @phba: Pointer to HBA context object. @@ -3918,43 +3047,33 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0442 Adapter failed to init, mbxCmd x%x " "CONFIG_PORT, mbxStatus x%x Data: x%x\n", - pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0); + pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0); spin_lock_irq(&phba->hbalock); - phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE; + phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE; spin_unlock_irq(&phba->hbalock); rc = -ENXIO; - } else { - /* Allow asynchronous mailbox command to go through */ - spin_lock_irq(&phba->hbalock); - phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; - spin_unlock_irq(&phba->hbalock); + } else done = 1; - } } if (!done) { rc = -EINVAL; goto do_prep_failed; } - if (pmb->u.mb.un.varCfgPort.sli_mode == 3) { - if (!pmb->u.mb.un.varCfgPort.cMA) { + if (pmb->mb.un.varCfgPort.sli_mode == 3) { + if (!pmb->mb.un.varCfgPort.cMA) { rc = -ENXIO; goto do_prep_failed; } - if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) { + if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) { phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; - phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi; - phba->max_vports = (phba->max_vpi > phba->max_vports) ? - phba->max_vpi : phba->max_vports; - + phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi; } else phba->max_vpi = 0; - if (pmb->u.mb.un.varCfgPort.gdss) - phba->sli3_options |= LPFC_SLI3_DSS_ENABLED; - if (pmb->u.mb.un.varCfgPort.gerbm) + if (pmb->mb.un.varCfgPort.gerbm) phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED; - if (pmb->u.mb.un.varCfgPort.gcrp) + if (pmb->mb.un.varCfgPort.gcrp) phba->sli3_options |= LPFC_SLI3_CRP_ENABLED; - if (pmb->u.mb.un.varCfgPort.ginb) { + if (pmb->mb.un.varCfgPort.ginb) { phba->sli3_options |= LPFC_SLI3_INB_ENABLED; phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get; phba->port_gp = phba->mbox->us.s3_inb_pgp.port; @@ -3970,7 +3089,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode) } if (phba->cfg_enable_bg) { - if (pmb->u.mb.un.varCfgPort.gbg) + if (pmb->mb.un.varCfgPort.gbg) phba->sli3_options |= LPFC_SLI3_BG_ENABLED; else lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -4065,9 +3184,8 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) if (rc) goto lpfc_sli_hba_setup_error; } - spin_lock_irq(&phba->hbalock); + phba->sli.sli_flag |= LPFC_PROCESS_LA; - spin_unlock_irq(&phba->hbalock); rc = lpfc_config_port_post(phba); if (rc) @@ -4082,488 +3200,6 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba) return rc; } -/** - * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region - * @phba: Pointer to HBA context object. - * @mboxq: mailbox pointer. - * This function issue a dump mailbox command to read config region - * 23 and parse the records in the region and populate driver - * data structure. - **/ -static int -lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba, - LPFC_MBOXQ_t *mboxq) -{ - struct lpfc_dmabuf *mp; - struct lpfc_mqe *mqe; - uint32_t data_length; - int rc; - - /* Program the default value of vlan_id and fc_map */ - phba->valid_vlan = 0; - phba->fc_map[0] = LPFC_FCOE_FCF_MAP0; - phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; - phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; - - mqe = &mboxq->u.mqe; - if (lpfc_dump_fcoe_param(phba, mboxq)) - return -ENOMEM; - - mp = (struct lpfc_dmabuf *) mboxq->context1; - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - - lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "(%d):2571 Mailbox cmd x%x Status x%x " - "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " - "x%x x%x x%x x%x x%x x%x x%x x%x x%x " - "CQ: x%x x%x x%x x%x\n", - mboxq->vport ? mboxq->vport->vpi : 0, - bf_get(lpfc_mqe_command, mqe), - bf_get(lpfc_mqe_status, mqe), - mqe->un.mb_words[0], mqe->un.mb_words[1], - mqe->un.mb_words[2], mqe->un.mb_words[3], - mqe->un.mb_words[4], mqe->un.mb_words[5], - mqe->un.mb_words[6], mqe->un.mb_words[7], - mqe->un.mb_words[8], mqe->un.mb_words[9], - mqe->un.mb_words[10], mqe->un.mb_words[11], - mqe->un.mb_words[12], mqe->un.mb_words[13], - mqe->un.mb_words[14], mqe->un.mb_words[15], - mqe->un.mb_words[16], mqe->un.mb_words[50], - mboxq->mcqe.word0, - mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, - mboxq->mcqe.trailer); - - if (rc) { - lpfc_mbuf_free(phba, mp->virt, mp->phys); - kfree(mp); - return -EIO; - } - data_length = mqe->un.mb_words[5]; - if (data_length > DMP_FCOEPARAM_RGN_SIZE) - return -EIO; - - lpfc_parse_fcoe_conf(phba, mp->virt, data_length); - lpfc_mbuf_free(phba, mp->virt, mp->phys); - kfree(mp); - return 0; -} - -/** - * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data - * @phba: pointer to lpfc hba data structure. - * @mboxq: pointer to the LPFC_MBOXQ_t structure. - * @vpd: pointer to the memory to hold resulting port vpd data. - * @vpd_size: On input, the number of bytes allocated to @vpd. - * On output, the number of data bytes in @vpd. - * - * This routine executes a READ_REV SLI4 mailbox command. In - * addition, this routine gets the port vpd data. - * - * Return codes - * 0 - sucessful - * ENOMEM - could not allocated memory. - **/ -static int -lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, - uint8_t *vpd, uint32_t *vpd_size) -{ - int rc = 0; - uint32_t dma_size; - struct lpfc_dmabuf *dmabuf; - struct lpfc_mqe *mqe; - - dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); - if (!dmabuf) - return -ENOMEM; - - /* - * Get a DMA buffer for the vpd data resulting from the READ_REV - * mailbox command. - */ - dma_size = *vpd_size; - dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, - dma_size, - &dmabuf->phys, - GFP_KERNEL); - if (!dmabuf->virt) { - kfree(dmabuf); - return -ENOMEM; - } - memset(dmabuf->virt, 0, dma_size); - - /* - * The SLI4 implementation of READ_REV conflicts at word1, - * bits 31:16 and SLI4 adds vpd functionality not present - * in SLI3. This code corrects the conflicts. - */ - lpfc_read_rev(phba, mboxq); - mqe = &mboxq->u.mqe; - mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys); - mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys); - mqe->un.read_rev.word1 &= 0x0000FFFF; - bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1); - bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size); - - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - if (rc) { - dma_free_coherent(&phba->pcidev->dev, dma_size, - dmabuf->virt, dmabuf->phys); - return -EIO; - } - - lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "(%d):0380 Mailbox cmd x%x Status x%x " - "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " - "x%x x%x x%x x%x x%x x%x x%x x%x x%x " - "CQ: x%x x%x x%x x%x\n", - mboxq->vport ? mboxq->vport->vpi : 0, - bf_get(lpfc_mqe_command, mqe), - bf_get(lpfc_mqe_status, mqe), - mqe->un.mb_words[0], mqe->un.mb_words[1], - mqe->un.mb_words[2], mqe->un.mb_words[3], - mqe->un.mb_words[4], mqe->un.mb_words[5], - mqe->un.mb_words[6], mqe->un.mb_words[7], - mqe->un.mb_words[8], mqe->un.mb_words[9], - mqe->un.mb_words[10], mqe->un.mb_words[11], - mqe->un.mb_words[12], mqe->un.mb_words[13], - mqe->un.mb_words[14], mqe->un.mb_words[15], - mqe->un.mb_words[16], mqe->un.mb_words[50], - mboxq->mcqe.word0, - mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, - mboxq->mcqe.trailer); - - /* - * The available vpd length cannot be bigger than the - * DMA buffer passed to the port. Catch the less than - * case and update the caller's size. - */ - if (mqe->un.read_rev.avail_vpd_len < *vpd_size) - *vpd_size = mqe->un.read_rev.avail_vpd_len; - - lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size); - dma_free_coherent(&phba->pcidev->dev, dma_size, - dmabuf->virt, dmabuf->phys); - kfree(dmabuf); - return 0; -} - -/** - * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues - * @phba: pointer to lpfc hba data structure. - * - * This routine is called to explicitly arm the SLI4 device's completion and - * event queues - **/ -static void -lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) -{ - uint8_t fcp_eqidx; - - lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM); - lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM); - lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM); - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) - lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx], - LPFC_QUEUE_REARM); - lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM); - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) - lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx], - LPFC_QUEUE_REARM); -} - -/** - * lpfc_sli4_hba_setup - SLI4 device intialization PCI function - * @phba: Pointer to HBA context object. - * - * This function is the main SLI4 device intialization PCI function. This - * function is called by the HBA intialization code, HBA reset code and - * HBA error attention handler code. Caller is not required to hold any - * locks. - **/ -int -lpfc_sli4_hba_setup(struct lpfc_hba *phba) -{ - int rc; - LPFC_MBOXQ_t *mboxq; - struct lpfc_mqe *mqe; - uint8_t *vpd; - uint32_t vpd_size; - uint32_t ftr_rsp = 0; - struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport); - struct lpfc_vport *vport = phba->pport; - struct lpfc_dmabuf *mp; - - /* Perform a PCI function reset to start from clean */ - rc = lpfc_pci_function_reset(phba); - if (unlikely(rc)) - return -ENODEV; - - /* Check the HBA Host Status Register for readyness */ - rc = lpfc_sli4_post_status_check(phba); - if (unlikely(rc)) - return -ENODEV; - else { - spin_lock_irq(&phba->hbalock); - phba->sli.sli_flag |= LPFC_SLI_ACTIVE; - spin_unlock_irq(&phba->hbalock); - } - - /* - * Allocate a single mailbox container for initializing the - * port. - */ - mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mboxq) - return -ENOMEM; - - /* - * Continue initialization with default values even if driver failed - * to read FCoE param config regions - */ - if (lpfc_sli4_read_fcoe_params(phba, mboxq)) - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT, - "2570 Failed to read FCoE parameters \n"); - - /* Issue READ_REV to collect vpd and FW information. */ - vpd_size = PAGE_SIZE; - vpd = kzalloc(vpd_size, GFP_KERNEL); - if (!vpd) { - rc = -ENOMEM; - goto out_free_mbox; - } - - rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size); - if (unlikely(rc)) - goto out_free_vpd; - - mqe = &mboxq->u.mqe; - if ((bf_get(lpfc_mbx_rd_rev_sli_lvl, - &mqe->un.read_rev) != LPFC_SLI_REV4) || - (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0376 READ_REV Error. SLI Level %d " - "FCoE enabled %d\n", - bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev), - bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)); - rc = -EIO; - goto out_free_vpd; - } - /* Single threaded at this point, no need for lock */ - spin_lock_irq(&phba->hbalock); - phba->hba_flag |= HBA_FCOE_SUPPORT; - spin_unlock_irq(&phba->hbalock); - /* - * Evaluate the read rev and vpd data. Populate the driver - * state with the results. If this routine fails, the failure - * is not fatal as the driver will use generic values. - */ - rc = lpfc_parse_vpd(phba, vpd, vpd_size); - if (unlikely(!rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0377 Error %d parsing vpd. " - "Using defaults.\n", rc); - rc = 0; - } - - /* By now, we should determine the SLI revision, hard code for now */ - phba->sli_rev = LPFC_SLI_REV4; - - /* - * Discover the port's supported feature set and match it against the - * hosts requests. - */ - lpfc_request_features(phba, mboxq); - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - if (unlikely(rc)) { - rc = -EIO; - goto out_free_vpd; - } - - /* - * The port must support FCP initiator mode as this is the - * only mode running in the host. - */ - if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) { - lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, - "0378 No support for fcpi mode.\n"); - ftr_rsp++; - } - - /* - * If the port cannot support the host's requested features - * then turn off the global config parameters to disable the - * feature in the driver. This is not a fatal error. - */ - if ((phba->cfg_enable_bg) && - !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) - ftr_rsp++; - - if (phba->max_vpi && phba->cfg_enable_npiv && - !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) - ftr_rsp++; - - if (ftr_rsp) { - lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, - "0379 Feature Mismatch Data: x%08x %08x " - "x%x x%x x%x\n", mqe->un.req_ftrs.word2, - mqe->un.req_ftrs.word3, phba->cfg_enable_bg, - phba->cfg_enable_npiv, phba->max_vpi); - if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) - phba->cfg_enable_bg = 0; - if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) - phba->cfg_enable_npiv = 0; - } - - /* These SLI3 features are assumed in SLI4 */ - spin_lock_irq(&phba->hbalock); - phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); - spin_unlock_irq(&phba->hbalock); - - /* Read the port's service parameters. */ - lpfc_read_sparam(phba, mboxq, vport->vpi); - mboxq->vport = vport; - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - mp = (struct lpfc_dmabuf *) mboxq->context1; - if (rc == MBX_SUCCESS) { - memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm)); - rc = 0; - } - - /* - * This memory was allocated by the lpfc_read_sparam routine. Release - * it to the mbuf pool. - */ - lpfc_mbuf_free(phba, mp->virt, mp->phys); - kfree(mp); - mboxq->context1 = NULL; - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0382 READ_SPARAM command failed " - "status %d, mbxStatus x%x\n", - rc, bf_get(lpfc_mqe_status, mqe)); - phba->link_state = LPFC_HBA_ERROR; - rc = -EIO; - goto out_free_vpd; - } - - if (phba->cfg_soft_wwnn) - u64_to_wwn(phba->cfg_soft_wwnn, - vport->fc_sparam.nodeName.u.wwn); - if (phba->cfg_soft_wwpn) - u64_to_wwn(phba->cfg_soft_wwpn, - vport->fc_sparam.portName.u.wwn); - memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName, - sizeof(struct lpfc_name)); - memcpy(&vport->fc_portname, &vport->fc_sparam.portName, - sizeof(struct lpfc_name)); - - /* Update the fc_host data structures with new wwn. */ - fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn); - fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn); - - /* Register SGL pool to the device using non-embedded mailbox command */ - rc = lpfc_sli4_post_sgl_list(phba); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0582 Error %d during sgl post operation", rc); - rc = -ENODEV; - goto out_free_vpd; - } - - /* Register SCSI SGL pool to the device */ - rc = lpfc_sli4_repost_scsi_sgl_list(phba); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, - "0383 Error %d during scsi sgl post opeation", - rc); - /* Some Scsi buffers were moved to the abort scsi list */ - /* A pci function reset will repost them */ - rc = -ENODEV; - goto out_free_vpd; - } - - /* Post the rpi header region to the device. */ - rc = lpfc_sli4_post_all_rpi_hdrs(phba); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0393 Error %d during rpi post operation\n", - rc); - rc = -ENODEV; - goto out_free_vpd; - } - /* Temporary initialization of lpfc_fip_flag to non-fip */ - bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0); - - /* Set up all the queues to the device */ - rc = lpfc_sli4_queue_setup(phba); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0381 Error %d during queue setup.\n ", rc); - goto out_stop_timers; - } - - /* Arm the CQs and then EQs on device */ - lpfc_sli4_arm_cqeq_intr(phba); - - /* Indicate device interrupt mode */ - phba->sli4_hba.intr_enable = 1; - - /* Allow asynchronous mailbox command to go through */ - spin_lock_irq(&phba->hbalock); - phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; - spin_unlock_irq(&phba->hbalock); - - /* Post receive buffers to the device */ - lpfc_sli4_rb_setup(phba); - - /* Start the ELS watchdog timer */ - /* - * The driver for SLI4 is not yet ready to process timeouts - * or interrupts. Once it is, the comment bars can be removed. - */ - /* mod_timer(&vport->els_tmofunc, - * jiffies + HZ * (phba->fc_ratov*2)); */ - - /* Start heart beat timer */ - mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); - phba->hb_outstanding = 0; - phba->last_completion_time = jiffies; - - /* Start error attention (ERATT) polling timer */ - mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); - - /* - * The port is ready, set the host's link state to LINK_DOWN - * in preparation for link interrupts. - */ - lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed); - mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - lpfc_set_loopback_flag(phba); - /* Change driver state to LPFC_LINK_DOWN right before init link */ - spin_lock_irq(&phba->hbalock); - phba->link_state = LPFC_LINK_DOWN; - spin_unlock_irq(&phba->hbalock); - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); - if (unlikely(rc != MBX_NOT_FINISHED)) { - kfree(vpd); - return 0; - } else - rc = -EIO; - - /* Unset all the queues set up in this routine when error out */ - if (rc) - lpfc_sli4_queue_unset(phba); - -out_stop_timers: - if (rc) - lpfc_stop_hba_timers(phba); -out_free_vpd: - kfree(vpd); -out_free_mbox: - mempool_free(mboxq, phba->mbox_mem_pool); - return rc; -} /** * lpfc_mbox_timeout - Timeout call back function for mbox timer @@ -4608,7 +3244,7 @@ void lpfc_mbox_timeout_handler(struct lpfc_hba *phba) { LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; - MAILBOX_t *mb = &pmbox->u.mb; + MAILBOX_t *mb = &pmbox->mb; struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; @@ -4645,7 +3281,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) spin_unlock_irq(&phba->pport->work_port_lock); spin_lock_irq(&phba->hbalock); phba->link_state = LPFC_LINK_UNKNOWN; - psli->sli_flag &= ~LPFC_SLI_ACTIVE; + psli->sli_flag &= ~LPFC_SLI2_ACTIVE; spin_unlock_irq(&phba->hbalock); pring = &psli->ring[psli->fcp_ring]; @@ -4653,20 +3289,32 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "0345 Resetting board due to mailbox timeout\n"); - - /* Reset the HBA device */ - lpfc_reset_hba(phba); + /* + * lpfc_offline calls lpfc_sli_hba_down which will clean up + * on oustanding mailbox commands. + */ + /* If resets are disabled then set error state and return. */ + if (!phba->cfg_enable_hba_reset) { + phba->link_state = LPFC_HBA_ERROR; + return; + } + lpfc_offline_prep(phba); + lpfc_offline(phba); + lpfc_sli_brdrestart(phba); + lpfc_online(phba); + lpfc_unblock_mgmt_io(phba); + return; } /** - * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware + * lpfc_sli_issue_mbox - Issue a mailbox command to firmware * @phba: Pointer to HBA context object. * @pmbox: Pointer to mailbox object. * @flag: Flag indicating how the mailbox need to be processed. * * This function is called by discovery code and HBA management code - * to submit a mailbox command to firmware with SLI-3 interface spec. This - * function gets the hbalock to protect the data structures. + * to submit a mailbox command to firmware. This function gets the + * hbalock to protect the data structures. * The mailbox command can be submitted in polling mode, in which case * this function will wait in a polling loop for the completion of the * mailbox. @@ -4684,9 +3332,8 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) * return codes the caller owns the mailbox command after the return of * the function. **/ -static int -lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, - uint32_t flag) +int +lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) { MAILBOX_t *mb; struct lpfc_sli *psli = &phba->sli; @@ -4702,10 +3349,6 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, spin_lock_irqsave(&phba->hbalock, drvr_flag); if (!pmbox) { /* processing mbox queue from intr_handler */ - if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { - spin_unlock_irqrestore(&phba->hbalock, drvr_flag); - return MBX_SUCCESS; - } processing_queue = 1; phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; pmbox = lpfc_mbox_get(phba); @@ -4722,7 +3365,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT, "1806 Mbox x%x failed. No vport\n", - pmbox->u.mb.mbxCommand); + pmbox->mb.mbxCommand); dump_stack(); goto out_not_finished; } @@ -4742,29 +3385,21 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, psli = &phba->sli; - mb = &pmbox->u.mb; + mb = &pmbox->mb; status = MBX_SUCCESS; if (phba->link_state == LPFC_HBA_ERROR) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command cannot issue */ - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "(%d):0311 Mailbox command x%x cannot " - "issue Data: x%x x%x\n", - pmbox->vport ? pmbox->vport->vpi : 0, - pmbox->u.mb.mbxCommand, psli->sli_flag, flag); + LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); goto out_not_finished; } if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT && !(readl(phba->HCregaddr) & HC_MBINT_ENA)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "(%d):2528 Mailbox command x%x cannot " - "issue Data: x%x x%x\n", - pmbox->vport ? pmbox->vport->vpi : 0, - pmbox->u.mb.mbxCommand, psli->sli_flag, flag); + LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); goto out_not_finished; } @@ -4778,24 +3413,14 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command cannot issue */ - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "(%d):2529 Mailbox command x%x " - "cannot issue Data: x%x x%x\n", - pmbox->vport ? pmbox->vport->vpi : 0, - pmbox->u.mb.mbxCommand, - psli->sli_flag, flag); + LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); goto out_not_finished; } - if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) { + if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) { spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command cannot issue */ - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "(%d):2530 Mailbox command x%x " - "cannot issue Data: x%x x%x\n", - pmbox->vport ? pmbox->vport->vpi : 0, - pmbox->u.mb.mbxCommand, - psli->sli_flag, flag); + LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); goto out_not_finished; } @@ -4837,17 +3462,12 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, /* If we are not polling, we MUST be in SLI2 mode */ if (flag != MBX_POLL) { - if (!(psli->sli_flag & LPFC_SLI_ACTIVE) && + if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) && (mb->mbxCommand != MBX_KILL_BOARD)) { psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; spin_unlock_irqrestore(&phba->hbalock, drvr_flag); /* Mbox command cannot issue */ - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "(%d):2531 Mailbox command x%x " - "cannot issue Data: x%x x%x\n", - pmbox->vport ? pmbox->vport->vpi : 0, - pmbox->u.mb.mbxCommand, - psli->sli_flag, flag); + LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag); goto out_not_finished; } /* timeout active mbox command */ @@ -4886,7 +3506,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, /* next set own bit for the adapter and copy over command word */ mb->mbxOwner = OWN_CHIP; - if (psli->sli_flag & LPFC_SLI_ACTIVE) { + if (psli->sli_flag & LPFC_SLI2_ACTIVE) { /* First copy command data to host SLIM area */ lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE); } else { @@ -4909,7 +3529,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, if (mb->mbxCommand == MBX_CONFIG_PORT) { /* switch over to host mailbox */ - psli->sli_flag |= LPFC_SLI_ACTIVE; + psli->sli_flag |= LPFC_SLI2_ACTIVE; } } @@ -4932,7 +3552,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, writel(CA_MBATT, phba->CAregaddr); readl(phba->CAregaddr); /* flush */ - if (psli->sli_flag & LPFC_SLI_ACTIVE) { + if (psli->sli_flag & LPFC_SLI2_ACTIVE) { /* First read mbox status word */ word0 = *((uint32_t *)phba->mbox); word0 = le32_to_cpu(word0); @@ -4971,7 +3591,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, spin_lock_irqsave(&phba->hbalock, drvr_flag); } - if (psli->sli_flag & LPFC_SLI_ACTIVE) { + if (psli->sli_flag & LPFC_SLI2_ACTIVE) { /* First copy command data */ word0 = *((uint32_t *)phba->mbox); word0 = le32_to_cpu(word0); @@ -4984,7 +3604,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, if (((slimword0 & OWN_CHIP) != OWN_CHIP) && slimmb->mbxStatus) { psli->sli_flag &= - ~LPFC_SLI_ACTIVE; + ~LPFC_SLI2_ACTIVE; word0 = slimword0; } } @@ -4996,7 +3616,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, ha_copy = readl(phba->HAregaddr); } - if (psli->sli_flag & LPFC_SLI_ACTIVE) { + if (psli->sli_flag & LPFC_SLI2_ACTIVE) { /* copy results back to user */ lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE); } else { @@ -5023,419 +3643,12 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, out_not_finished: if (processing_queue) { - pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED; + pmbox->mb.mbxStatus = MBX_NOT_FINISHED; lpfc_mbox_cmpl_put(phba, pmbox); } return MBX_NOT_FINISHED; } -/** - * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox - * @phba: Pointer to HBA context object. - * @mboxq: Pointer to mailbox object. - * - * The function posts a mailbox to the port. The mailbox is expected - * to be comletely filled in and ready for the port to operate on it. - * This routine executes a synchronous completion operation on the - * mailbox by polling for its completion. - * - * The caller must not be holding any locks when calling this routine. - * - * Returns: - * MBX_SUCCESS - mailbox posted successfully - * Any of the MBX error values. - **/ -static int -lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) -{ - int rc = MBX_SUCCESS; - unsigned long iflag; - uint32_t db_ready; - uint32_t mcqe_status; - uint32_t mbx_cmnd; - unsigned long timeout; - struct lpfc_sli *psli = &phba->sli; - struct lpfc_mqe *mb = &mboxq->u.mqe; - struct lpfc_bmbx_create *mbox_rgn; - struct dma_address *dma_address; - struct lpfc_register bmbx_reg; - - /* - * Only one mailbox can be active to the bootstrap mailbox region - * at a time and there is no queueing provided. - */ - spin_lock_irqsave(&phba->hbalock, iflag); - if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { - spin_unlock_irqrestore(&phba->hbalock, iflag); - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "(%d):2532 Mailbox command x%x (x%x) " - "cannot issue Data: x%x x%x\n", - mboxq->vport ? mboxq->vport->vpi : 0, - mboxq->u.mb.mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, mboxq), - psli->sli_flag, MBX_POLL); - return MBXERR_ERROR; - } - /* The server grabs the token and owns it until release */ - psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; - phba->sli.mbox_active = mboxq; - spin_unlock_irqrestore(&phba->hbalock, iflag); - - /* - * Initialize the bootstrap memory region to avoid stale data areas - * in the mailbox post. Then copy the caller's mailbox contents to - * the bmbx mailbox region. - */ - mbx_cmnd = bf_get(lpfc_mqe_command, mb); - memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create)); - lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt, - sizeof(struct lpfc_mqe)); - - /* Post the high mailbox dma address to the port and wait for ready. */ - dma_address = &phba->sli4_hba.bmbx.dma_address; - writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); - - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) - * 1000) + jiffies; - do { - bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); - db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); - if (!db_ready) - msleep(2); - - if (time_after(jiffies, timeout)) { - rc = MBXERR_ERROR; - goto exit; - } - } while (!db_ready); - - /* Post the low mailbox dma address to the port. */ - writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) - * 1000) + jiffies; - do { - bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); - db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg); - if (!db_ready) - msleep(2); - - if (time_after(jiffies, timeout)) { - rc = MBXERR_ERROR; - goto exit; - } - } while (!db_ready); - - /* - * Read the CQ to ensure the mailbox has completed. - * If so, update the mailbox status so that the upper layers - * can complete the request normally. - */ - lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb, - sizeof(struct lpfc_mqe)); - mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt; - lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe, - sizeof(struct lpfc_mcqe)); - mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe); - - /* Prefix the mailbox status with range x4000 to note SLI4 status. */ - if (mcqe_status != MB_CQE_STATUS_SUCCESS) { - bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status); - rc = MBXERR_ERROR; - } - - lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "(%d):0356 Mailbox cmd x%x (x%x) Status x%x " - "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" - " x%x x%x CQ: x%x x%x x%x x%x\n", - mboxq->vport ? mboxq->vport->vpi : 0, - mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq), - bf_get(lpfc_mqe_status, mb), - mb->un.mb_words[0], mb->un.mb_words[1], - mb->un.mb_words[2], mb->un.mb_words[3], - mb->un.mb_words[4], mb->un.mb_words[5], - mb->un.mb_words[6], mb->un.mb_words[7], - mb->un.mb_words[8], mb->un.mb_words[9], - mb->un.mb_words[10], mb->un.mb_words[11], - mb->un.mb_words[12], mboxq->mcqe.word0, - mboxq->mcqe.mcqe_tag0, mboxq->mcqe.mcqe_tag1, - mboxq->mcqe.trailer); -exit: - /* We are holding the token, no needed for lock when release */ - spin_lock_irqsave(&phba->hbalock, iflag); - psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - phba->sli.mbox_active = NULL; - spin_unlock_irqrestore(&phba->hbalock, iflag); - return rc; -} - -/** - * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware - * @phba: Pointer to HBA context object. - * @pmbox: Pointer to mailbox object. - * @flag: Flag indicating how the mailbox need to be processed. - * - * This function is called by discovery code and HBA management code to submit - * a mailbox command to firmware with SLI-4 interface spec. - * - * Return codes the caller owns the mailbox command after the return of the - * function. - **/ -static int -lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, - uint32_t flag) -{ - struct lpfc_sli *psli = &phba->sli; - unsigned long iflags; - int rc; - - /* Detect polling mode and jump to a handler */ - if (!phba->sli4_hba.intr_enable) { - if (flag == MBX_POLL) - rc = lpfc_sli4_post_sync_mbox(phba, mboxq); - else - rc = -EIO; - if (rc != MBX_SUCCESS) - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "(%d):2541 Mailbox command x%x " - "(x%x) cannot issue Data: x%x x%x\n", - mboxq->vport ? mboxq->vport->vpi : 0, - mboxq->u.mb.mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, mboxq), - psli->sli_flag, flag); - return rc; - } else if (flag == MBX_POLL) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "(%d):2542 Mailbox command x%x (x%x) " - "cannot issue Data: x%x x%x\n", - mboxq->vport ? mboxq->vport->vpi : 0, - mboxq->u.mb.mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, mboxq), - psli->sli_flag, flag); - return -EIO; - } - - /* Now, interrupt mode asynchrous mailbox command */ - rc = lpfc_mbox_cmd_check(phba, mboxq); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "(%d):2543 Mailbox command x%x (x%x) " - "cannot issue Data: x%x x%x\n", - mboxq->vport ? mboxq->vport->vpi : 0, - mboxq->u.mb.mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, mboxq), - psli->sli_flag, flag); - goto out_not_finished; - } - rc = lpfc_mbox_dev_check(phba); - if (unlikely(rc)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "(%d):2544 Mailbox command x%x (x%x) " - "cannot issue Data: x%x x%x\n", - mboxq->vport ? mboxq->vport->vpi : 0, - mboxq->u.mb.mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, mboxq), - psli->sli_flag, flag); - goto out_not_finished; - } - - /* Put the mailbox command to the driver internal FIFO */ - psli->slistat.mbox_busy++; - spin_lock_irqsave(&phba->hbalock, iflags); - lpfc_mbox_put(phba, mboxq); - spin_unlock_irqrestore(&phba->hbalock, iflags); - lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "(%d):0354 Mbox cmd issue - Enqueue Data: " - "x%x (x%x) x%x x%x x%x\n", - mboxq->vport ? mboxq->vport->vpi : 0xffffff, - bf_get(lpfc_mqe_command, &mboxq->u.mqe), - lpfc_sli4_mbox_opcode_get(phba, mboxq), - phba->pport->port_state, - psli->sli_flag, MBX_NOWAIT); - /* Wake up worker thread to transport mailbox command from head */ - lpfc_worker_wake_up(phba); - - return MBX_BUSY; - -out_not_finished: - return MBX_NOT_FINISHED; -} - -/** - * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device - * @phba: Pointer to HBA context object. - * - * This function is called by worker thread to send a mailbox command to - * SLI4 HBA firmware. - * - **/ -int -lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) -{ - struct lpfc_sli *psli = &phba->sli; - LPFC_MBOXQ_t *mboxq; - int rc = MBX_SUCCESS; - unsigned long iflags; - struct lpfc_mqe *mqe; - uint32_t mbx_cmnd; - - /* Check interrupt mode before post async mailbox command */ - if (unlikely(!phba->sli4_hba.intr_enable)) - return MBX_NOT_FINISHED; - - /* Check for mailbox command service token */ - spin_lock_irqsave(&phba->hbalock, iflags); - if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) { - spin_unlock_irqrestore(&phba->hbalock, iflags); - return MBX_NOT_FINISHED; - } - if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { - spin_unlock_irqrestore(&phba->hbalock, iflags); - return MBX_NOT_FINISHED; - } - if (unlikely(phba->sli.mbox_active)) { - spin_unlock_irqrestore(&phba->hbalock, iflags); - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0384 There is pending active mailbox cmd\n"); - return MBX_NOT_FINISHED; - } - /* Take the mailbox command service token */ - psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE; - - /* Get the next mailbox command from head of queue */ - mboxq = lpfc_mbox_get(phba); - - /* If no more mailbox command waiting for post, we're done */ - if (!mboxq) { - psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - spin_unlock_irqrestore(&phba->hbalock, iflags); - return MBX_SUCCESS; - } - phba->sli.mbox_active = mboxq; - spin_unlock_irqrestore(&phba->hbalock, iflags); - - /* Check device readiness for posting mailbox command */ - rc = lpfc_mbox_dev_check(phba); - if (unlikely(rc)) - /* Driver clean routine will clean up pending mailbox */ - goto out_not_finished; - - /* Prepare the mbox command to be posted */ - mqe = &mboxq->u.mqe; - mbx_cmnd = bf_get(lpfc_mqe_command, mqe); - - /* Start timer for the mbox_tmo and log some mailbox post messages */ - mod_timer(&psli->mbox_tmo, (jiffies + - (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd)))); - - lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "(%d):0355 Mailbox cmd x%x (x%x) issue Data: " - "x%x x%x\n", - mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, - lpfc_sli4_mbox_opcode_get(phba, mboxq), - phba->pport->port_state, psli->sli_flag); - - if (mbx_cmnd != MBX_HEARTBEAT) { - if (mboxq->vport) { - lpfc_debugfs_disc_trc(mboxq->vport, - LPFC_DISC_TRC_MBOX_VPORT, - "MBOX Send vport: cmd:x%x mb:x%x x%x", - mbx_cmnd, mqe->un.mb_words[0], - mqe->un.mb_words[1]); - } else { - lpfc_debugfs_disc_trc(phba->pport, - LPFC_DISC_TRC_MBOX, - "MBOX Send: cmd:x%x mb:x%x x%x", - mbx_cmnd, mqe->un.mb_words[0], - mqe->un.mb_words[1]); - } - } - psli->slistat.mbox_cmd++; - - /* Post the mailbox command to the port */ - rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); - if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "(%d):2533 Mailbox command x%x (x%x) " - "cannot issue Data: x%x x%x\n", - mboxq->vport ? mboxq->vport->vpi : 0, - mboxq->u.mb.mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, mboxq), - psli->sli_flag, MBX_NOWAIT); - goto out_not_finished; - } - - return rc; - -out_not_finished: - spin_lock_irqsave(&phba->hbalock, iflags); - mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED; - __lpfc_mbox_cmpl_put(phba, mboxq); - /* Release the token */ - psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - phba->sli.mbox_active = NULL; - spin_unlock_irqrestore(&phba->hbalock, iflags); - - return MBX_NOT_FINISHED; -} - -/** - * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command - * @phba: Pointer to HBA context object. - * @pmbox: Pointer to mailbox object. - * @flag: Flag indicating how the mailbox need to be processed. - * - * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from - * the API jump table function pointer from the lpfc_hba struct. - * - * Return codes the caller owns the mailbox command after the return of the - * function. - **/ -int -lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag) -{ - return phba->lpfc_sli_issue_mbox(phba, pmbox, flag); -} - -/** - * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table - * @phba: The hba struct for which this call is being executed. - * @dev_grp: The HBA PCI-Device group number. - * - * This routine sets up the mbox interface API function jump table in @phba - * struct. - * Returns: 0 - success, -ENODEV - failure. - **/ -int -lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) -{ - - switch (dev_grp) { - case LPFC_PCI_DEV_LP: - phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3; - phba->lpfc_sli_handle_slow_ring_event = - lpfc_sli_handle_slow_ring_event_s3; - phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3; - phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3; - phba->lpfc_sli_brdready = lpfc_sli_brdready_s3; - break; - case LPFC_PCI_DEV_OC: - phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4; - phba->lpfc_sli_handle_slow_ring_event = - lpfc_sli_handle_slow_ring_event_s4; - phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4; - phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4; - phba->lpfc_sli_brdready = lpfc_sli_brdready_s4; - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1420 Invalid HBA PCI-device group: 0x%x\n", - dev_grp); - return -ENODEV; - break; - } - return 0; -} - /** * __lpfc_sli_ringtx_put - Add an iocb to the txq * @phba: Pointer to HBA context object. @@ -5488,34 +3701,35 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } /** - * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb + * __lpfc_sli_issue_iocb - Lockless version of lpfc_sli_issue_iocb * @phba: Pointer to HBA context object. - * @ring_number: SLI ring number to issue iocb on. + * @pring: Pointer to driver SLI ring object. * @piocb: Pointer to command iocb. * @flag: Flag indicating if this command can be put into txq. * - * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue - * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is - * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT - * flag is turned on, the function returns IOCB_ERROR. When the link is down, - * this function allows only iocbs for posting buffers. This function finds - * next available slot in the command ring and posts the command to the - * available slot and writes the port attention register to request HBA start - * processing new iocb. If there is no slot available in the ring and - * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise - * the function returns IOCB_BUSY. - * - * This function is called with hbalock held. The function will return success - * after it successfully submit the iocb to firmware or after adding to the - * txq. + * __lpfc_sli_issue_iocb is used by other functions in the driver + * to issue an iocb command to the HBA. If the PCI slot is recovering + * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT + * flag is turned on, the function returns IOCB_ERROR. + * When the link is down, this function allows only iocbs for + * posting buffers. + * This function finds next available slot in the command ring and + * posts the command to the available slot and writes the port + * attention register to request HBA start processing new iocb. + * If there is no slot available in the ring and + * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the + * txq, otherwise the function returns IOCB_BUSY. + * + * This function is called with hbalock held. + * The function will return success after it successfully submit the + * iocb to firmware or after adding to the txq. **/ static int -__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, +__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb, uint32_t flag) { struct lpfc_iocbq *nextiocb; IOCB_t *iocb; - struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; if (piocb->iocb_cmpl && (!piocb->vport) && (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && @@ -5619,571 +3833,79 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, return IOCB_BUSY; } -/** - * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl. - * @phba: Pointer to HBA context object. - * @piocb: Pointer to command iocb. - * @sglq: Pointer to the scatter gather queue object. - * - * This routine converts the bpl or bde that is in the IOCB - * to a sgl list for the sli4 hardware. The physical address - * of the bpl/bde is converted back to a virtual address. - * If the IOCB contains a BPL then the list of BDE's is - * converted to sli4_sge's. If the IOCB contains a single - * BDE then it is converted to a single sli_sge. - * The IOCB is still in cpu endianess so the contents of - * the bpl can be used without byte swapping. - * - * Returns valid XRI = Success, NO_XRI = Failure. -**/ -static uint16_t -lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, - struct lpfc_sglq *sglq) -{ - uint16_t xritag = NO_XRI; - struct ulp_bde64 *bpl = NULL; - struct ulp_bde64 bde; - struct sli4_sge *sgl = NULL; - IOCB_t *icmd; - int numBdes = 0; - int i = 0; - - if (!piocbq || !sglq) - return xritag; - - sgl = (struct sli4_sge *)sglq->sgl; - icmd = &piocbq->iocb; - if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { - numBdes = icmd->un.genreq64.bdl.bdeSize / - sizeof(struct ulp_bde64); - /* The addrHigh and addrLow fields within the IOCB - * have not been byteswapped yet so there is no - * need to swap them back. - */ - bpl = (struct ulp_bde64 *) - ((struct lpfc_dmabuf *)piocbq->context3)->virt; - - if (!bpl) - return xritag; - - for (i = 0; i < numBdes; i++) { - /* Should already be byte swapped. */ - sgl->addr_hi = bpl->addrHigh; - sgl->addr_lo = bpl->addrLow; - /* swap the size field back to the cpu so we - * can assign it to the sgl. - */ - bde.tus.w = le32_to_cpu(bpl->tus.w); - bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize); - if ((i+1) == numBdes) - bf_set(lpfc_sli4_sge_last, sgl, 1); - else - bf_set(lpfc_sli4_sge_last, sgl, 0); - sgl->word2 = cpu_to_le32(sgl->word2); - sgl->word3 = cpu_to_le32(sgl->word3); - bpl++; - sgl++; - } - } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) { - /* The addrHigh and addrLow fields of the BDE have not - * been byteswapped yet so they need to be swapped - * before putting them in the sgl. - */ - sgl->addr_hi = - cpu_to_le32(icmd->un.genreq64.bdl.addrHigh); - sgl->addr_lo = - cpu_to_le32(icmd->un.genreq64.bdl.addrLow); - bf_set(lpfc_sli4_sge_len, sgl, - icmd->un.genreq64.bdl.bdeSize); - bf_set(lpfc_sli4_sge_last, sgl, 1); - sgl->word2 = cpu_to_le32(sgl->word2); - sgl->word3 = cpu_to_le32(sgl->word3); - } - return sglq->sli4_xritag; -} /** - * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution + * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. * @piocb: Pointer to command iocb. + * @flag: Flag indicating if this command can be put into txq. * - * This routine performs a round robin SCSI command to SLI4 FCP WQ index - * distribution. - * - * Return: index into SLI4 fast-path FCP queue index. + * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb + * function. This function gets the hbalock and calls + * __lpfc_sli_issue_iocb function and will return the error returned + * by __lpfc_sli_issue_iocb function. This wrapper is used by + * functions which do not hold hbalock. **/ -static uint32_t -lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) +int +lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *piocb, uint32_t flag) { - static uint32_t fcp_qidx; + unsigned long iflags; + int rc; + + spin_lock_irqsave(&phba->hbalock, iflags); + rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag); + spin_unlock_irqrestore(&phba->hbalock, iflags); - return fcp_qidx++ % phba->cfg_fcp_wq_count; + return rc; } /** - * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. + * lpfc_extra_ring_setup - Extra ring setup function * @phba: Pointer to HBA context object. - * @piocb: Pointer to command iocb. - * @wqe: Pointer to the work queue entry. * - * This routine converts the iocb command to its Work Queue Entry - * equivalent. The wqe pointer should not have any fields set when - * this routine is called because it will memcpy over them. - * This routine does not set the CQ_ID or the WQEC bits in the - * wqe. + * This function is called while driver attaches with the + * HBA to setup the extra ring. The extra ring is used + * only when driver needs to support target mode functionality + * or IP over FC functionalities. * - * Returns: 0 = Success, IOCB_ERROR = Failure. + * This function is called with no lock held. **/ static int -lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, - union lpfc_wqe *wqe) +lpfc_extra_ring_setup( struct lpfc_hba *phba) { - uint32_t payload_len = 0; - uint8_t ct = 0; - uint32_t fip; - uint32_t abort_tag; - uint8_t command_type = ELS_COMMAND_NON_FIP; - uint8_t cmnd; - uint16_t xritag; - struct ulp_bde64 *bpl = NULL; - - fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags); - /* The fcp commands will set command type */ - if ((!(iocbq->iocb_flag & LPFC_IO_FCP)) && (!fip)) - command_type = ELS_COMMAND_NON_FIP; - else if (!(iocbq->iocb_flag & LPFC_IO_FCP)) - command_type = ELS_COMMAND_FIP; - else if (iocbq->iocb_flag & LPFC_IO_FCP) - command_type = FCP_COMMAND; - else { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2019 Invalid cmd 0x%x\n", - iocbq->iocb.ulpCommand); - return IOCB_ERROR; - } - /* Some of the fields are in the right position already */ - memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); - abort_tag = (uint32_t) iocbq->iotag; - xritag = iocbq->sli4_xritag; - wqe->words[7] = 0; /* The ct field has moved so reset */ - /* words0-2 bpl convert bde */ - if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { - bpl = (struct ulp_bde64 *) - ((struct lpfc_dmabuf *)iocbq->context3)->virt; - if (!bpl) - return IOCB_ERROR; - - /* Should already be byte swapped. */ - wqe->generic.bde.addrHigh = le32_to_cpu(bpl->addrHigh); - wqe->generic.bde.addrLow = le32_to_cpu(bpl->addrLow); - /* swap the size field back to the cpu so we - * can assign it to the sgl. - */ - wqe->generic.bde.tus.w = le32_to_cpu(bpl->tus.w); - payload_len = wqe->generic.bde.tus.f.bdeSize; - } else - payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize; + struct lpfc_sli *psli; + struct lpfc_sli_ring *pring; - iocbq->iocb.ulpIoTag = iocbq->iotag; - cmnd = iocbq->iocb.ulpCommand; + psli = &phba->sli; - switch (iocbq->iocb.ulpCommand) { - case CMD_ELS_REQUEST64_CR: - if (!iocbq->iocb.ulpLe) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2007 Only Limited Edition cmd Format" - " supported 0x%x\n", - iocbq->iocb.ulpCommand); - return IOCB_ERROR; - } - wqe->els_req.payload_len = payload_len; - /* Els_reguest64 has a TMO */ - bf_set(wqe_tmo, &wqe->els_req.wqe_com, - iocbq->iocb.ulpTimeout); - /* Need a VF for word 4 set the vf bit*/ - bf_set(els_req64_vf, &wqe->els_req, 0); - /* And a VFID for word 12 */ - bf_set(els_req64_vfid, &wqe->els_req, 0); - /* - * Set ct field to 3, indicates that the context_tag field - * contains the FCFI and remote N_Port_ID is - * in word 5. - */ + /* Adjust cmd/rsp ring iocb entries more evenly */ - ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); - bf_set(lpfc_wqe_gen_context, &wqe->generic, - iocbq->iocb.ulpContext); + /* Take some away from the FCP ring */ + pring = &psli->ring[psli->fcp_ring]; + pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; + pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; + pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; + pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; - if (iocbq->vport->fc_myDID != 0) { - bf_set(els_req64_sid, &wqe->els_req, - iocbq->vport->fc_myDID); - bf_set(els_req64_sp, &wqe->els_req, 1); - } - bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); - bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); - /* CCP CCPE PV PRI in word10 were set in the memcpy */ - break; - case CMD_XMIT_SEQUENCE64_CR: - /* word3 iocb=io_tag32 wqe=payload_offset */ - /* payload offset used for multilpe outstanding - * sequences on the same exchange - */ - wqe->words[3] = 0; - /* word4 relative_offset memcpy */ - /* word5 r_ctl/df_ctl memcpy */ - bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); - wqe->xmit_sequence.xmit_len = payload_len; - break; - case CMD_XMIT_BCAST64_CN: - /* word3 iocb=iotag32 wqe=payload_len */ - wqe->words[3] = 0; /* no definition for this in wqe */ - /* word4 iocb=rsvd wqe=rsvd */ - /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ - /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ - bf_set(lpfc_wqe_gen_ct, &wqe->generic, - ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); - break; - case CMD_FCP_IWRITE64_CR: - command_type = FCP_COMMAND_DATA_OUT; - /* The struct for wqe fcp_iwrite has 3 fields that are somewhat - * confusing. - * word3 is payload_len: byte offset to the sgl entry for the - * fcp_command. - * word4 is total xfer len, same as the IOCB->ulpParameter. - * word5 is initial xfer len 0 = wait for xfer-ready - */ + /* and give them to the extra ring */ + pring = &psli->ring[psli->extra_ring]; - /* Always wait for xfer-ready before sending data */ - wqe->fcp_iwrite.initial_xfer_len = 0; - /* word 4 (xfer length) should have been set on the memcpy */ + pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; + pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; + pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; + pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; - /* allow write to fall through to read */ - case CMD_FCP_IREAD64_CR: - /* FCP_CMD is always the 1st sgl entry */ - wqe->fcp_iread.payload_len = - payload_len + sizeof(struct fcp_rsp); - - /* word 4 (xfer length) should have been set on the memcpy */ - - bf_set(lpfc_wqe_gen_erp, &wqe->generic, - iocbq->iocb.ulpFCP2Rcvy); - bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS); - /* The XC bit and the XS bit are similar. The driver never - * tracked whether or not the exchange was previouslly open. - * XC = Exchange create, 0 is create. 1 is already open. - * XS = link cmd: 1 do not close the exchange after command. - * XS = 0 close exchange when command completes. - * The only time we would not set the XC bit is when the XS bit - * is set and we are sending our 2nd or greater command on - * this exchange. - */ - - /* ALLOW read & write to fall through to ICMD64 */ - case CMD_FCP_ICMND64_CR: - /* Always open the exchange */ - bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); - - wqe->words[10] &= 0xffff0000; /* zero out ebde count */ - bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); - break; - case CMD_GEN_REQUEST64_CR: - /* word3 command length is described as byte offset to the - * rsp_data. Would always be 16, sizeof(struct sli4_sge) - * sgl[0] = cmnd - * sgl[1] = rsp. - * - */ - wqe->gen_req.command_len = payload_len; - /* Word4 parameter copied in the memcpy */ - /* Word5 [rctl, type, df_ctl, la] copied in memcpy */ - /* word6 context tag copied in memcpy */ - if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { - ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2015 Invalid CT %x command 0x%x\n", - ct, iocbq->iocb.ulpCommand); - return IOCB_ERROR; - } - bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0); - bf_set(wqe_tmo, &wqe->gen_req.wqe_com, - iocbq->iocb.ulpTimeout); - - bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); - command_type = OTHER_COMMAND; - break; - case CMD_XMIT_ELS_RSP64_CX: - /* words0-2 BDE memcpy */ - /* word3 iocb=iotag32 wqe=rsvd */ - wqe->words[3] = 0; - /* word4 iocb=did wge=rsvd. */ - wqe->words[4] = 0; - /* word5 iocb=rsvd wge=did */ - bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, - iocbq->iocb.un.elsreq64.remoteID); - - bf_set(lpfc_wqe_gen_ct, &wqe->generic, - ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); - - bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); - bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); - if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) - bf_set(lpfc_wqe_gen_context, &wqe->generic, - iocbq->vport->vpi + phba->vpi_base); - command_type = OTHER_COMMAND; - break; - case CMD_CLOSE_XRI_CN: - case CMD_ABORT_XRI_CN: - case CMD_ABORT_XRI_CX: - /* words 0-2 memcpy should be 0 rserved */ - /* port will send abts */ - if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) - /* - * The link is down so the fw does not need to send abts - * on the wire. - */ - bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); - else - bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); - bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); - abort_tag = iocbq->iocb.un.acxri.abortIoTag; - wqe->words[5] = 0; - bf_set(lpfc_wqe_gen_ct, &wqe->generic, - ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); - abort_tag = iocbq->iocb.un.acxri.abortIoTag; - wqe->generic.abort_tag = abort_tag; - /* - * The abort handler will send us CMD_ABORT_XRI_CN or - * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX - */ - bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX); - cmnd = CMD_ABORT_XRI_CX; - command_type = OTHER_COMMAND; - xritag = 0; - break; - case CMD_XRI_ABORTED_CX: - case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ - /* words0-2 are all 0's no bde */ - /* word3 and word4 are rsvrd */ - wqe->words[3] = 0; - wqe->words[4] = 0; - /* word5 iocb=rsvd wge=did */ - /* There is no remote port id in the IOCB? */ - /* Let this fall through and fail */ - case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ - case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ - case CMD_FCP_TRSP64_CX: /* Target mode rcv */ - case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */ - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2014 Invalid command 0x%x\n", - iocbq->iocb.ulpCommand); - return IOCB_ERROR; - break; - - } - bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag); - bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag); - wqe->generic.abort_tag = abort_tag; - bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type); - bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd); - bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass); - bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT); - - return 0; -} - -/** - * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb - * @phba: Pointer to HBA context object. - * @ring_number: SLI ring number to issue iocb on. - * @piocb: Pointer to command iocb. - * @flag: Flag indicating if this command can be put into txq. - * - * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue - * an iocb command to an HBA with SLI-4 interface spec. - * - * This function is called with hbalock held. The function will return success - * after it successfully submit the iocb to firmware or after adding to the - * txq. - **/ -static int -__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, - struct lpfc_iocbq *piocb, uint32_t flag) -{ - struct lpfc_sglq *sglq; - uint16_t xritag; - union lpfc_wqe wqe; - struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number]; - uint32_t fcp_wqidx; - - if (piocb->sli4_xritag == NO_XRI) { - if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || - piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN) - sglq = NULL; - else { - sglq = __lpfc_sli_get_sglq(phba); - if (!sglq) - return IOCB_ERROR; - piocb->sli4_xritag = sglq->sli4_xritag; - } - } else if (piocb->iocb_flag & LPFC_IO_FCP) { - sglq = NULL; /* These IO's already have an XRI and - * a mapped sgl. - */ - } else { - /* This is a continuation of a commandi,(CX) so this - * sglq is on the active list - */ - sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag); - if (!sglq) - return IOCB_ERROR; - } - - if (sglq) { - xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq); - if (xritag != sglq->sli4_xritag) - return IOCB_ERROR; - } - - if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe)) - return IOCB_ERROR; - - if (piocb->iocb_flag & LPFC_IO_FCP) { - fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb); - if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe)) - return IOCB_ERROR; - } else { - if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) - return IOCB_ERROR; - } - lpfc_sli_ringtxcmpl_put(phba, pring, piocb); - - return 0; -} - -/** - * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb - * - * This routine wraps the actual lockless version for issusing IOCB function - * pointer from the lpfc_hba struct. - * - * Return codes: - * IOCB_ERROR - Error - * IOCB_SUCCESS - Success - * IOCB_BUSY - Busy - **/ -static inline int -__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, - struct lpfc_iocbq *piocb, uint32_t flag) -{ - return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); -} - -/** - * lpfc_sli_api_table_setup - Set up sli api fucntion jump table - * @phba: The hba struct for which this call is being executed. - * @dev_grp: The HBA PCI-Device group number. - * - * This routine sets up the SLI interface API function jump table in @phba - * struct. - * Returns: 0 - success, -ENODEV - failure. - **/ -int -lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) -{ - - switch (dev_grp) { - case LPFC_PCI_DEV_LP: - phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3; - phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3; - break; - case LPFC_PCI_DEV_OC: - phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4; - phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4; - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1419 Invalid HBA PCI-device group: 0x%x\n", - dev_grp); - return -ENODEV; - break; - } - phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq; - return 0; -} - -/** - * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb - * @phba: Pointer to HBA context object. - * @pring: Pointer to driver SLI ring object. - * @piocb: Pointer to command iocb. - * @flag: Flag indicating if this command can be put into txq. - * - * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb - * function. This function gets the hbalock and calls - * __lpfc_sli_issue_iocb function and will return the error returned - * by __lpfc_sli_issue_iocb function. This wrapper is used by - * functions which do not hold hbalock. - **/ -int -lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, - struct lpfc_iocbq *piocb, uint32_t flag) -{ - unsigned long iflags; - int rc; - - spin_lock_irqsave(&phba->hbalock, iflags); - rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); - spin_unlock_irqrestore(&phba->hbalock, iflags); - - return rc; -} - -/** - * lpfc_extra_ring_setup - Extra ring setup function - * @phba: Pointer to HBA context object. - * - * This function is called while driver attaches with the - * HBA to setup the extra ring. The extra ring is used - * only when driver needs to support target mode functionality - * or IP over FC functionalities. - * - * This function is called with no lock held. - **/ -static int -lpfc_extra_ring_setup( struct lpfc_hba *phba) -{ - struct lpfc_sli *psli; - struct lpfc_sli_ring *pring; - - psli = &phba->sli; - - /* Adjust cmd/rsp ring iocb entries more evenly */ - - /* Take some away from the FCP ring */ - pring = &psli->ring[psli->fcp_ring]; - pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES; - pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES; - pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES; - pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES; - - /* and give them to the extra ring */ - pring = &psli->ring[psli->extra_ring]; - - pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES; - pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES; - pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES; - pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES; - - /* Setup default profile for this ring */ - pring->iotag_max = 4096; - pring->num_mask = 1; - pring->prt[0].profile = 0; /* Mask 0 */ - pring->prt[0].rctl = phba->cfg_multi_ring_rctl; - pring->prt[0].type = phba->cfg_multi_ring_type; - pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; - return 0; -} + /* Setup default profile for this ring */ + pring->iotag_max = 4096; + pring->num_mask = 1; + pring->prt[0].profile = 0; /* Mask 0 */ + pring->prt[0].rctl = phba->cfg_multi_ring_rctl; + pring->prt[0].type = phba->cfg_multi_ring_type; + pring->prt[0].lpfc_sli_rcv_unsol_event = NULL; + return 0; +} /** * lpfc_sli_async_event_handler - ASYNC iocb handler function @@ -6425,52 +4147,6 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba) return 1; } -/** - * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system - * @phba: Pointer to HBA context object. - * - * This routine flushes the mailbox command subsystem. It will unconditionally - * flush all the mailbox commands in the three possible stages in the mailbox - * command sub-system: pending mailbox command queue; the outstanding mailbox - * command; and completed mailbox command queue. It is caller's responsibility - * to make sure that the driver is in the proper state to flush the mailbox - * command sub-system. Namely, the posting of mailbox commands into the - * pending mailbox command queue from the various clients must be stopped; - * either the HBA is in a state that it will never works on the outstanding - * mailbox command (such as in EEH or ERATT conditions) or the outstanding - * mailbox command has been completed. - **/ -static void -lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba) -{ - LIST_HEAD(completions); - struct lpfc_sli *psli = &phba->sli; - LPFC_MBOXQ_t *pmb; - unsigned long iflag; - - /* Flush all the mailbox commands in the mbox system */ - spin_lock_irqsave(&phba->hbalock, iflag); - /* The pending mailbox command queue */ - list_splice_init(&phba->sli.mboxq, &completions); - /* The outstanding active mailbox command */ - if (psli->mbox_active) { - list_add_tail(&psli->mbox_active->list, &completions); - psli->mbox_active = NULL; - psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - } - /* The completed mailbox command queue */ - list_splice_init(&phba->sli.mboxq_cmpl, &completions); - spin_unlock_irqrestore(&phba->hbalock, iflag); - - /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */ - while (!list_empty(&completions)) { - list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); - pmb->u.mb.mbxStatus = MBX_NOT_FINISHED; - if (pmb->mbox_cmpl) - pmb->mbox_cmpl(phba, pmb); - } -} - /** * lpfc_sli_host_down - Vport cleanup function * @vport: Pointer to virtual port object. @@ -6564,11 +4240,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) struct lpfc_sli *psli = &phba->sli; struct lpfc_sli_ring *pring; struct lpfc_dmabuf *buf_ptr; - unsigned long flags = 0; + LPFC_MBOXQ_t *pmb; int i; - - /* Shutdown the mailbox command sub-system */ - lpfc_sli_mbox_sys_shutdown(phba); + unsigned long flags = 0; lpfc_hba_down_prep(phba); @@ -6613,42 +4287,28 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) /* Return any active mbox cmds */ del_timer_sync(&psli->mbox_tmo); + spin_lock_irqsave(&phba->hbalock, flags); - spin_lock_irqsave(&phba->pport->work_port_lock, flags); + spin_lock(&phba->pport->work_port_lock); phba->pport->work_port_events &= ~WORKER_MBOX_TMO; - spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); - - return 1; -} - -/** - * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA - * @phba: Pointer to HBA context object. - * - * This function cleans up all queues, iocb, buffers, mailbox commands while - * shutting down the SLI4 HBA FCoE function. This function is called with no - * lock held and always returns 1. - * - * This function does the following to cleanup driver FCoE function resources: - * - Free discovery resources for each virtual port - * - Cleanup any pending fabric iocbs - * - Iterate through the iocb txq and free each entry in the list. - * - Free up any buffer posted to the HBA. - * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc. - * - Free mailbox commands in the mailbox queue. - **/ -int -lpfc_sli4_hba_down(struct lpfc_hba *phba) -{ - /* Stop the SLI4 device port */ - lpfc_stop_port(phba); + spin_unlock(&phba->pport->work_port_lock); - /* Tear down the queues in the HBA */ - lpfc_sli4_queue_unset(phba); - - /* unregister default FCFI from the HBA */ - lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi); + /* Return any pending or completed mbox cmds */ + list_splice_init(&phba->sli.mboxq, &completions); + if (psli->mbox_active) { + list_add_tail(&psli->mbox_active->list, &completions); + psli->mbox_active = NULL; + psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; + } + list_splice_init(&phba->sli.mboxq_cmpl, &completions); + spin_unlock_irqrestore(&phba->hbalock, flags); + while (!list_empty(&completions)) { + list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list); + pmb->mb.mbxStatus = MBX_NOT_FINISHED; + if (pmb->mbox_cmpl) + pmb->mbox_cmpl(phba,pmb); + } return 1; } @@ -6979,10 +4639,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, iabt = &abtsiocbp->iocb; iabt->un.acxri.abortType = ABORT_TYPE_ABTS; iabt->un.acxri.abortContextTag = icmd->ulpContext; - if (phba->sli_rev == LPFC_SLI_REV4) - iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; - else - iabt->un.acxri.abortIoTag = icmd->ulpIoTag; + iabt->un.acxri.abortIoTag = icmd->ulpIoTag; iabt->ulpLe = 1; iabt->ulpClass = icmd->ulpClass; @@ -6998,7 +4655,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, "abort cmd iotag x%x\n", iabt->un.acxri.abortContextTag, iabt->un.acxri.abortIoTag, abtsiocbp->iotag); - retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0); + retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0); if (retval) __lpfc_sli_release_iocbq(phba, abtsiocbp); @@ -7181,10 +4838,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, cmd = &iocbq->iocb; abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; - if (phba->sli_rev == LPFC_SLI_REV4) - abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag; - else - abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; + abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; abtsiocb->iocb.ulpLe = 1; abtsiocb->iocb.ulpClass = cmd->ulpClass; abtsiocb->vport = phba->pport; @@ -7196,8 +4850,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, /* Setup callback routine and issue the command. */ abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; - ret_val = lpfc_sli_issue_iocb(phba, pring->ringno, - abtsiocb, 0); + ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0); if (ret_val == IOCB_ERROR) { lpfc_sli_release_iocbq(phba, abtsiocb); errcnt++; @@ -7278,7 +4931,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, **/ int lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, - uint32_t ring_number, + struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb, struct lpfc_iocbq *prspiocbq, uint32_t timeout) @@ -7309,7 +4962,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, readl(phba->HCregaddr); /* flush */ } - retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0); + retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0); if (retval == IOCB_SUCCESS) { timeout_req = timeout * HZ; timeleft = wait_event_timeout(done_q, @@ -7424,165 +5077,66 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, } /** - * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system + * lpfc_sli_flush_mbox_queue - mailbox queue cleanup function * @phba: Pointer to HBA context. * - * This function is called to shutdown the driver's mailbox sub-system. - * It first marks the mailbox sub-system is in a block state to prevent - * the asynchronous mailbox command from issued off the pending mailbox - * command queue. If the mailbox command sub-system shutdown is due to - * HBA error conditions such as EEH or ERATT, this routine shall invoke - * the mailbox sub-system flush routine to forcefully bring down the - * mailbox sub-system. Otherwise, if it is due to normal condition (such - * as with offline or HBA function reset), this routine will wait for the - * outstanding mailbox command to complete before invoking the mailbox - * sub-system flush routine to gracefully bring down mailbox sub-system. + * This function is called to cleanup any pending mailbox + * objects in the driver queue before bringing the HBA offline. + * This function is called while resetting the HBA. + * The function is called without any lock held. The function + * takes hbalock to update SLI data structure. + * This function returns 1 when there is an active mailbox + * command pending else returns 0. **/ -void -lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) +int +lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba) { - struct lpfc_sli *psli = &phba->sli; - uint8_t actcmd = MBX_HEARTBEAT; - unsigned long timeout; + struct lpfc_vport *vport = phba->pport; + int i = 0; + uint32_t ha_copy; - spin_lock_irq(&phba->hbalock); - psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; - spin_unlock_irq(&phba->hbalock); + while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) { + if (i++ > LPFC_MBOX_TMO * 1000) + return 1; - if (psli->sli_flag & LPFC_SLI_ACTIVE) { + /* + * Call lpfc_sli_handle_mb_event only if a mailbox cmd + * did finish. This way we won't get the misleading + * "Stray Mailbox Interrupt" message. + */ spin_lock_irq(&phba->hbalock); - if (phba->sli.mbox_active) - actcmd = phba->sli.mbox_active->u.mb.mbxCommand; + ha_copy = phba->work_ha; + phba->work_ha &= ~HA_MBATT; spin_unlock_irq(&phba->hbalock); - /* Determine how long we might wait for the active mailbox - * command to be gracefully completed by firmware. - */ - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * - 1000) + jiffies; - while (phba->sli.mbox_active) { - /* Check active mailbox complete status every 2ms */ - msleep(2); - if (time_after(jiffies, timeout)) - /* Timeout, let the mailbox flush routine to - * forcefully release active mailbox command - */ - break; - } + + if (ha_copy & HA_MBATT) + if (lpfc_sli_handle_mb_event(phba) == 0) + i = 0; + + msleep(1); } - lpfc_sli_mbox_sys_flush(phba); + + return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0; } /** - * lpfc_sli_eratt_read - read sli-3 error attention events + * lpfc_sli_check_eratt - check error attention events * @phba: Pointer to HBA context. * - * This function is called to read the SLI3 device error attention registers - * for possible error attention events. The caller must hold the hostlock - * with spin_lock_irq(). + * This function is called form timer soft interrupt context to check HBA's + * error attention register bit for error attention events. * * This fucntion returns 1 when there is Error Attention in the Host Attention * Register and returns 0 otherwise. **/ -static int -lpfc_sli_eratt_read(struct lpfc_hba *phba) +int +lpfc_sli_check_eratt(struct lpfc_hba *phba) { uint32_t ha_copy; - /* Read chip Host Attention (HA) register */ - ha_copy = readl(phba->HAregaddr); - if (ha_copy & HA_ERATT) { - /* Read host status register to retrieve error event */ - lpfc_sli_read_hs(phba); - - /* Check if there is a deferred error condition is active */ - if ((HS_FFER1 & phba->work_hs) && - ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | - HS_FFER6 | HS_FFER7) & phba->work_hs)) { - spin_lock_irq(&phba->hbalock); - phba->hba_flag |= DEFER_ERATT; - spin_unlock_irq(&phba->hbalock); - /* Clear all interrupt enable conditions */ - writel(0, phba->HCregaddr); - readl(phba->HCregaddr); - } - - /* Set the driver HA work bitmap */ - spin_lock_irq(&phba->hbalock); - phba->work_ha |= HA_ERATT; - /* Indicate polling handles this ERATT */ - phba->hba_flag |= HBA_ERATT_HANDLED; - spin_unlock_irq(&phba->hbalock); - return 1; - } - return 0; -} - -/** - * lpfc_sli4_eratt_read - read sli-4 error attention events - * @phba: Pointer to HBA context. - * - * This function is called to read the SLI4 device error attention registers - * for possible error attention events. The caller must hold the hostlock - * with spin_lock_irq(). - * - * This fucntion returns 1 when there is Error Attention in the Host Attention - * Register and returns 0 otherwise. - **/ -static int -lpfc_sli4_eratt_read(struct lpfc_hba *phba) -{ - uint32_t uerr_sta_hi, uerr_sta_lo; - uint32_t onlnreg0, onlnreg1; - - /* For now, use the SLI4 device internal unrecoverable error - * registers for error attention. This can be changed later. - */ - onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr); - onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr); - if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) { - uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr); - uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr); - if (uerr_sta_lo || uerr_sta_hi) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1423 HBA Unrecoverable error: " - "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, " - "online0_reg=0x%x, online1_reg=0x%x\n", - uerr_sta_lo, uerr_sta_hi, - onlnreg0, onlnreg1); - /* TEMP: as the driver error recover logic is not - * fully developed, we just log the error message - * and the device error attention action is now - * temporarily disabled. - */ - return 0; - phba->work_status[0] = uerr_sta_lo; - phba->work_status[1] = uerr_sta_hi; - spin_lock_irq(&phba->hbalock); - /* Set the driver HA work bitmap */ - phba->work_ha |= HA_ERATT; - /* Indicate polling handles this ERATT */ - phba->hba_flag |= HBA_ERATT_HANDLED; - spin_unlock_irq(&phba->hbalock); - return 1; - } - } - return 0; -} - -/** - * lpfc_sli_check_eratt - check error attention events - * @phba: Pointer to HBA context. - * - * This function is called from timer soft interrupt context to check HBA's - * error attention register bit for error attention events. - * - * This fucntion returns 1 when there is Error Attention in the Host Attention - * Register and returns 0 otherwise. - **/ -int -lpfc_sli_check_eratt(struct lpfc_hba *phba) -{ - uint32_t ha_copy; + /* If PCI channel is offline, don't process it */ + if (unlikely(pci_channel_offline(phba->pcidev))) + return 0; /* If somebody is waiting to handle an eratt, don't process it * here. The brdkill function will do this. @@ -7607,84 +5161,56 @@ lpfc_sli_check_eratt(struct lpfc_hba *phba) return 0; } - /* If PCI channel is offline, don't process it */ - if (unlikely(pci_channel_offline(phba->pcidev))) { - spin_unlock_irq(&phba->hbalock); - return 0; - } + /* Read chip Host Attention (HA) register */ + ha_copy = readl(phba->HAregaddr); + if (ha_copy & HA_ERATT) { + /* Read host status register to retrieve error event */ + lpfc_sli_read_hs(phba); - switch (phba->sli_rev) { - case LPFC_SLI_REV2: - case LPFC_SLI_REV3: - /* Read chip Host Attention (HA) register */ - ha_copy = lpfc_sli_eratt_read(phba); - break; - case LPFC_SLI_REV4: - /* Read devcie Uncoverable Error (UERR) registers */ - ha_copy = lpfc_sli4_eratt_read(phba); - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0299 Invalid SLI revision (%d)\n", - phba->sli_rev); - ha_copy = 0; - break; + /* Check if there is a deferred error condition is active */ + if ((HS_FFER1 & phba->work_hs) && + ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 | + HS_FFER6 | HS_FFER7) & phba->work_hs)) { + phba->hba_flag |= DEFER_ERATT; + /* Clear all interrupt enable conditions */ + writel(0, phba->HCregaddr); + readl(phba->HCregaddr); + } + + /* Set the driver HA work bitmap */ + phba->work_ha |= HA_ERATT; + /* Indicate polling handles this ERATT */ + phba->hba_flag |= HBA_ERATT_HANDLED; + spin_unlock_irq(&phba->hbalock); + return 1; } spin_unlock_irq(&phba->hbalock); - - return ha_copy; -} - -/** - * lpfc_intr_state_check - Check device state for interrupt handling - * @phba: Pointer to HBA context. - * - * This inline routine checks whether a device or its PCI slot is in a state - * that the interrupt should be handled. - * - * This function returns 0 if the device or the PCI slot is in a state that - * interrupt should be handled, otherwise -EIO. - */ -static inline int -lpfc_intr_state_check(struct lpfc_hba *phba) -{ - /* If the pci channel is offline, ignore all the interrupts */ - if (unlikely(pci_channel_offline(phba->pcidev))) - return -EIO; - - /* Update device level interrupt statistics */ - phba->sli.slistat.sli_intr++; - - /* Ignore all interrupts during initialization. */ - if (unlikely(phba->link_state < LPFC_LINK_DOWN)) - return -EIO; - return 0; } /** - * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device + * lpfc_sp_intr_handler - The slow-path interrupt handler of lpfc driver * @irq: Interrupt number. * @dev_id: The device context pointer. * * This function is directly called from the PCI layer as an interrupt - * service routine when device with SLI-3 interface spec is enabled with - * MSI-X multi-message interrupt mode and there are slow-path events in - * the HBA. However, when the device is enabled with either MSI or Pin-IRQ - * interrupt mode, this function is called as part of the device-level - * interrupt handler. When the PCI slot is in error recovery or the HBA - * is undergoing initialization, the interrupt handler will not process - * the interrupt. The link attention and ELS ring attention events are - * handled by the worker thread. The interrupt handler signals the worker - * thread and returns for these events. This function is called without - * any lock held. It gets the hbalock to access and update SLI data + * service routine when the device is enabled with MSI-X multi-message + * interrupt mode and there are slow-path events in the HBA. However, + * when the device is enabled with either MSI or Pin-IRQ interrupt mode, + * this function is called as part of the device-level interrupt handler. + * When the PCI slot is in error recovery or the HBA is undergoing + * initialization, the interrupt handler will not process the interrupt. + * The link attention and ELS ring attention events are handled by the + * worker thread. The interrupt handler signals the worker thread and + * and returns for these events. This function is called without any + * lock held. It gets the hbalock to access and update SLI data * structures. * * This function returns IRQ_HANDLED when interrupt is handled else it * returns IRQ_NONE. **/ irqreturn_t -lpfc_sli_sp_intr_handler(int irq, void *dev_id) +lpfc_sp_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; uint32_t ha_copy; @@ -7714,8 +5240,13 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) * individual interrupt handler in MSI-X multi-message interrupt mode */ if (phba->intr_type == MSIX) { - /* Check device state for handling interrupt */ - if (lpfc_intr_state_check(phba)) + /* If the pci channel is offline, ignore all the interrupts */ + if (unlikely(pci_channel_offline(phba->pcidev))) + return IRQ_NONE; + /* Update device-level interrupt statistics */ + phba->sli.slistat.sli_intr++; + /* Ignore all interrupts during initialization. */ + if (unlikely(phba->link_state < LPFC_LINK_DOWN)) return IRQ_NONE; /* Need to read HA REG for slow-path events */ spin_lock_irqsave(&phba->hbalock, iflag); @@ -7740,7 +5271,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) * interrupt. */ if (unlikely(phba->hba_flag & DEFER_ERATT)) { - spin_unlock_irqrestore(&phba->hbalock, iflag); + spin_unlock_irq(&phba->hbalock); return IRQ_NONE; } @@ -7833,7 +5364,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) { pmb = phba->sli.mbox_active; - pmbox = &pmb->u.mb; + pmbox = &pmb->mb; mbox = phba->mbox; vport = pmb->vport; @@ -7903,8 +5434,7 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) LOG_MBOX | LOG_SLI, "0350 rc should have" "been MBX_BUSY"); - if (rc != MBX_NOT_FINISHED) - goto send_current_mbox; + goto send_current_mbox; } } spin_lock_irqsave( @@ -7941,29 +5471,29 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id) } return IRQ_HANDLED; -} /* lpfc_sli_sp_intr_handler */ +} /* lpfc_sp_intr_handler */ /** - * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device. + * lpfc_fp_intr_handler - The fast-path interrupt handler of lpfc driver * @irq: Interrupt number. * @dev_id: The device context pointer. * * This function is directly called from the PCI layer as an interrupt - * service routine when device with SLI-3 interface spec is enabled with - * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB - * ring event in the HBA. However, when the device is enabled with either - * MSI or Pin-IRQ interrupt mode, this function is called as part of the - * device-level interrupt handler. When the PCI slot is in error recovery - * or the HBA is undergoing initialization, the interrupt handler will not - * process the interrupt. The SCSI FCP fast-path ring event are handled in - * the intrrupt context. This function is called without any lock held. - * It gets the hbalock to access and update SLI data structures. + * service routine when the device is enabled with MSI-X multi-message + * interrupt mode and there is a fast-path FCP IOCB ring event in the + * HBA. However, when the device is enabled with either MSI or Pin-IRQ + * interrupt mode, this function is called as part of the device-level + * interrupt handler. When the PCI slot is in error recovery or the HBA + * is undergoing initialization, the interrupt handler will not process + * the interrupt. The SCSI FCP fast-path ring event are handled in the + * intrrupt context. This function is called without any lock held. It + * gets the hbalock to access and update SLI data structures. * * This function returns IRQ_HANDLED when interrupt is handled else it * returns IRQ_NONE. **/ irqreturn_t -lpfc_sli_fp_intr_handler(int irq, void *dev_id) +lpfc_fp_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; uint32_t ha_copy; @@ -7983,8 +5513,13 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id) * individual interrupt handler in MSI-X multi-message interrupt mode */ if (phba->intr_type == MSIX) { - /* Check device state for handling interrupt */ - if (lpfc_intr_state_check(phba)) + /* If pci channel is offline, ignore all the interrupts */ + if (unlikely(pci_channel_offline(phba->pcidev))) + return IRQ_NONE; + /* Update device-level interrupt statistics */ + phba->sli.slistat.sli_intr++; + /* Ignore all interrupts during initialization. */ + if (unlikely(phba->link_state < LPFC_LINK_DOWN)) return IRQ_NONE; /* Need to read HA REG for FCP ring and other ring events */ ha_copy = readl(phba->HAregaddr); @@ -7995,7 +5530,7 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id) * any interrupt. */ if (unlikely(phba->hba_flag & DEFER_ERATT)) { - spin_unlock_irqrestore(&phba->hbalock, iflag); + spin_unlock_irq(&phba->hbalock); return IRQ_NONE; } writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)), @@ -8031,27 +5566,26 @@ lpfc_sli_fp_intr_handler(int irq, void *dev_id) } } return IRQ_HANDLED; -} /* lpfc_sli_fp_intr_handler */ +} /* lpfc_fp_intr_handler */ /** - * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device + * lpfc_intr_handler - The device-level interrupt handler of lpfc driver * @irq: Interrupt number. * @dev_id: The device context pointer. * - * This function is the HBA device-level interrupt handler to device with - * SLI-3 interface spec, called from the PCI layer when either MSI or - * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which - * requires driver attention. This function invokes the slow-path interrupt - * attention handling function and fast-path interrupt attention handling - * function in turn to process the relevant HBA attention events. This - * function is called without any lock held. It gets the hbalock to access - * and update SLI data structures. + * This function is the device-level interrupt handler called from the PCI + * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is + * an event in the HBA which requires driver attention. This function + * invokes the slow-path interrupt attention handling function and fast-path + * interrupt attention handling function in turn to process the relevant + * HBA attention events. This function is called without any lock held. It + * gets the hbalock to access and update SLI data structures. * * This function returns IRQ_HANDLED when interrupt is handled, else it * returns IRQ_NONE. **/ irqreturn_t -lpfc_sli_intr_handler(int irq, void *dev_id) +lpfc_intr_handler(int irq, void *dev_id) { struct lpfc_hba *phba; irqreturn_t sp_irq_rc, fp_irq_rc; @@ -8066,8 +5600,15 @@ lpfc_sli_intr_handler(int irq, void *dev_id) if (unlikely(!phba)) return IRQ_NONE; - /* Check device state for handling interrupt */ - if (lpfc_intr_state_check(phba)) + /* If the pci channel is offline, ignore all the interrupts. */ + if (unlikely(pci_channel_offline(phba->pcidev))) + return IRQ_NONE; + + /* Update device level interrupt statistics */ + phba->sli.slistat.sli_intr++; + + /* Ignore all interrupts during initialization. */ + if (unlikely(phba->link_state < LPFC_LINK_DOWN)) return IRQ_NONE; spin_lock(&phba->hbalock); @@ -8109,7 +5650,7 @@ lpfc_sli_intr_handler(int irq, void *dev_id) status2 >>= (4*LPFC_ELS_RING); if (status1 || (status2 & HA_RXMASK)) - sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id); + sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id); else sp_irq_rc = IRQ_NONE; @@ -8129,3322 +5670,10 @@ lpfc_sli_intr_handler(int irq, void *dev_id) status2 = 0; if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK)) - fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id); + fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id); else fp_irq_rc = IRQ_NONE; /* Return device-level interrupt handling status */ return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc; -} /* lpfc_sli_intr_handler */ - -/** - * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked by the worker thread to process all the pending - * SLI4 FCP abort XRI events. - **/ -void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba) -{ - struct lpfc_cq_event *cq_event; - - /* First, declare the fcp xri abort event has been handled */ - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~FCP_XRI_ABORT_EVENT; - spin_unlock_irq(&phba->hbalock); - /* Now, handle all the fcp xri abort events */ - while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) { - /* Get the first event from the head of the event queue */ - spin_lock_irq(&phba->hbalock); - list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue, - cq_event, struct lpfc_cq_event, list); - spin_unlock_irq(&phba->hbalock); - /* Notify aborted XRI for FCP work queue */ - lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri); - /* Free the event processed back to the free pool */ - lpfc_sli4_cq_event_release(phba, cq_event); - } -} - -/** - * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked by the worker thread to process all the pending - * SLI4 els abort xri events. - **/ -void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) -{ - struct lpfc_cq_event *cq_event; - - /* First, declare the els xri abort event has been handled */ - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~ELS_XRI_ABORT_EVENT; - spin_unlock_irq(&phba->hbalock); - /* Now, handle all the els xri abort events */ - while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) { - /* Get the first event from the head of the event queue */ - spin_lock_irq(&phba->hbalock); - list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue, - cq_event, struct lpfc_cq_event, list); - spin_unlock_irq(&phba->hbalock); - /* Notify aborted XRI for ELS work queue */ - lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri); - /* Free the event processed back to the free pool */ - lpfc_sli4_cq_event_release(phba, cq_event); - } -} - -static void -lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn, - struct lpfc_iocbq *pIocbOut, - struct lpfc_wcqe_complete *wcqe) -{ - size_t offset = offsetof(struct lpfc_iocbq, iocb); - - memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, - sizeof(struct lpfc_iocbq) - offset); - memset(&pIocbIn->sli4_info, 0, - sizeof(struct lpfc_sli4_rspiocb_info)); - /* Map WCQE parameters into irspiocb parameters */ - pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); - if (pIocbOut->iocb_flag & LPFC_IO_FCP) - if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) - pIocbIn->iocb.un.fcpi.fcpi_parm = - pIocbOut->iocb.un.fcpi.fcpi_parm - - wcqe->total_data_placed; - else - pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; - else - pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; - /* Load in additional WCQE parameters */ - pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe); - pIocbIn->sli4_info.bfield = 0; - if (bf_get(lpfc_wcqe_c_xb, wcqe)) - pIocbIn->sli4_info.bfield |= LPFC_XB; - if (bf_get(lpfc_wcqe_c_pv, wcqe)) { - pIocbIn->sli4_info.bfield |= LPFC_PV; - pIocbIn->sli4_info.priority = - bf_get(lpfc_wcqe_c_priority, wcqe); - } -} - -/** - * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event - * @phba: Pointer to HBA context object. - * @cqe: Pointer to mailbox completion queue entry. - * - * This routine process a mailbox completion queue entry with asynchrous - * event. - * - * Return: true if work posted to worker thread, otherwise false. - **/ -static bool -lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) -{ - struct lpfc_cq_event *cq_event; - unsigned long iflags; - - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "0392 Async Event: word0:x%x, word1:x%x, " - "word2:x%x, word3:x%x\n", mcqe->word0, - mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer); - - /* Allocate a new internal CQ_EVENT entry */ - cq_event = lpfc_sli4_cq_event_alloc(phba); - if (!cq_event) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0394 Failed to allocate CQ_EVENT entry\n"); - return false; - } - - /* Move the CQE into an asynchronous event entry */ - memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe)); - spin_lock_irqsave(&phba->hbalock, iflags); - list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue); - /* Set the async event flag */ - phba->hba_flag |= ASYNC_EVENT; - spin_unlock_irqrestore(&phba->hbalock, iflags); - - return true; -} - -/** - * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event - * @phba: Pointer to HBA context object. - * @cqe: Pointer to mailbox completion queue entry. - * - * This routine process a mailbox completion queue entry with mailbox - * completion event. - * - * Return: true if work posted to worker thread, otherwise false. - **/ -static bool -lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) -{ - uint32_t mcqe_status; - MAILBOX_t *mbox, *pmbox; - struct lpfc_mqe *mqe; - struct lpfc_vport *vport; - struct lpfc_nodelist *ndlp; - struct lpfc_dmabuf *mp; - unsigned long iflags; - LPFC_MBOXQ_t *pmb; - bool workposted = false; - int rc; - - /* If not a mailbox complete MCQE, out by checking mailbox consume */ - if (!bf_get(lpfc_trailer_completed, mcqe)) - goto out_no_mqe_complete; - - /* Get the reference to the active mbox command */ - spin_lock_irqsave(&phba->hbalock, iflags); - pmb = phba->sli.mbox_active; - if (unlikely(!pmb)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, - "1832 No pending MBOX command to handle\n"); - spin_unlock_irqrestore(&phba->hbalock, iflags); - goto out_no_mqe_complete; - } - spin_unlock_irqrestore(&phba->hbalock, iflags); - mqe = &pmb->u.mqe; - pmbox = (MAILBOX_t *)&pmb->u.mqe; - mbox = phba->mbox; - vport = pmb->vport; - - /* Reset heartbeat timer */ - phba->last_completion_time = jiffies; - del_timer(&phba->sli.mbox_tmo); - - /* Move mbox data to caller's mailbox region, do endian swapping */ - if (pmb->mbox_cmpl && mbox) - lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); - /* Set the mailbox status with SLI4 range 0x4000 */ - mcqe_status = bf_get(lpfc_mcqe_status, mcqe); - if (mcqe_status != MB_CQE_STATUS_SUCCESS) - bf_set(lpfc_mqe_status, mqe, - (LPFC_MBX_ERROR_RANGE | mcqe_status)); - - if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { - pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; - lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, - "MBOX dflt rpi: status:x%x rpi:x%x", - mcqe_status, - pmbox->un.varWords[0], 0); - if (mcqe_status == MB_CQE_STATUS_SUCCESS) { - mp = (struct lpfc_dmabuf *)(pmb->context1); - ndlp = (struct lpfc_nodelist *)pmb->context2; - /* Reg_LOGIN of dflt RPI was successful. Now lets get - * RID of the PPI using the same mbox buffer. - */ - lpfc_unreg_login(phba, vport->vpi, - pmbox->un.varWords[0], pmb); - pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; - pmb->context1 = mp; - pmb->context2 = ndlp; - pmb->vport = vport; - rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); - if (rc != MBX_BUSY) - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | - LOG_SLI, "0385 rc should " - "have been MBX_BUSY\n"); - if (rc != MBX_NOT_FINISHED) - goto send_current_mbox; - } - } - spin_lock_irqsave(&phba->pport->work_port_lock, iflags); - phba->pport->work_port_events &= ~WORKER_MBOX_TMO; - spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags); - - /* There is mailbox completion work to do */ - spin_lock_irqsave(&phba->hbalock, iflags); - __lpfc_mbox_cmpl_put(phba, pmb); - phba->work_ha |= HA_MBATT; - spin_unlock_irqrestore(&phba->hbalock, iflags); - workposted = true; - -send_current_mbox: - spin_lock_irqsave(&phba->hbalock, iflags); - /* Release the mailbox command posting token */ - phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; - /* Setting active mailbox pointer need to be in sync to flag clear */ - phba->sli.mbox_active = NULL; - spin_unlock_irqrestore(&phba->hbalock, iflags); - /* Wake up worker thread to post the next pending mailbox command */ - lpfc_worker_wake_up(phba); -out_no_mqe_complete: - if (bf_get(lpfc_trailer_consumed, mcqe)) - lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); - return workposted; -} - -/** - * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry - * @phba: Pointer to HBA context object. - * @cqe: Pointer to mailbox completion queue entry. - * - * This routine process a mailbox completion queue entry, it invokes the - * proper mailbox complete handling or asynchrous event handling routine - * according to the MCQE's async bit. - * - * Return: true if work posted to worker thread, otherwise false. - **/ -static bool -lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) -{ - struct lpfc_mcqe mcqe; - bool workposted; - - /* Copy the mailbox MCQE and convert endian order as needed */ - lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe)); - - /* Invoke the proper event handling routine */ - if (!bf_get(lpfc_trailer_async, &mcqe)) - workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe); - else - workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe); - return workposted; -} - -/** - * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event - * @phba: Pointer to HBA context object. - * @wcqe: Pointer to work-queue completion queue entry. - * - * This routine handles an ELS work-queue completion event. - * - * Return: true if work posted to worker thread, otherwise false. - **/ -static bool -lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, - struct lpfc_wcqe_complete *wcqe) -{ - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; - struct lpfc_iocbq *cmdiocbq; - struct lpfc_iocbq *irspiocbq; - unsigned long iflags; - bool workposted = false; - - spin_lock_irqsave(&phba->hbalock, iflags); - pring->stats.iocb_event++; - /* Look up the ELS command IOCB and create pseudo response IOCB */ - cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, - bf_get(lpfc_wcqe_c_request_tag, wcqe)); - spin_unlock_irqrestore(&phba->hbalock, iflags); - - if (unlikely(!cmdiocbq)) { - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "0386 ELS complete with no corresponding " - "cmdiocb: iotag (%d)\n", - bf_get(lpfc_wcqe_c_request_tag, wcqe)); - return workposted; - } - - /* Fake the irspiocbq and copy necessary response information */ - irspiocbq = lpfc_sli_get_iocbq(phba); - if (!irspiocbq) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0387 Failed to allocate an iocbq\n"); - return workposted; - } - lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe); - - /* Add the irspiocb to the response IOCB work list */ - spin_lock_irqsave(&phba->hbalock, iflags); - list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue); - /* Indicate ELS ring attention */ - phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING)); - spin_unlock_irqrestore(&phba->hbalock, iflags); - workposted = true; - - return workposted; -} - -/** - * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event - * @phba: Pointer to HBA context object. - * @wcqe: Pointer to work-queue completion queue entry. - * - * This routine handles slow-path WQ entry comsumed event by invoking the - * proper WQ release routine to the slow-path WQ. - **/ -static void -lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba, - struct lpfc_wcqe_release *wcqe) -{ - /* Check for the slow-path ELS work queue */ - if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id) - lpfc_sli4_wq_release(phba->sli4_hba.els_wq, - bf_get(lpfc_wcqe_r_wqe_index, wcqe)); - else - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "2579 Slow-path wqe consume event carries " - "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n", - bf_get(lpfc_wcqe_r_wqe_index, wcqe), - phba->sli4_hba.els_wq->queue_id); -} - -/** - * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event - * @phba: Pointer to HBA context object. - * @cq: Pointer to a WQ completion queue. - * @wcqe: Pointer to work-queue completion queue entry. - * - * This routine handles an XRI abort event. - * - * Return: true if work posted to worker thread, otherwise false. - **/ -static bool -lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, - struct lpfc_queue *cq, - struct sli4_wcqe_xri_aborted *wcqe) -{ - bool workposted = false; - struct lpfc_cq_event *cq_event; - unsigned long iflags; - - /* Allocate a new internal CQ_EVENT entry */ - cq_event = lpfc_sli4_cq_event_alloc(phba); - if (!cq_event) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0602 Failed to allocate CQ_EVENT entry\n"); - return false; - } - - /* Move the CQE into the proper xri abort event list */ - memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted)); - switch (cq->subtype) { - case LPFC_FCP: - spin_lock_irqsave(&phba->hbalock, iflags); - list_add_tail(&cq_event->list, - &phba->sli4_hba.sp_fcp_xri_aborted_work_queue); - /* Set the fcp xri abort event flag */ - phba->hba_flag |= FCP_XRI_ABORT_EVENT; - spin_unlock_irqrestore(&phba->hbalock, iflags); - workposted = true; - break; - case LPFC_ELS: - spin_lock_irqsave(&phba->hbalock, iflags); - list_add_tail(&cq_event->list, - &phba->sli4_hba.sp_els_xri_aborted_work_queue); - /* Set the els xri abort event flag */ - phba->hba_flag |= ELS_XRI_ABORT_EVENT; - spin_unlock_irqrestore(&phba->hbalock, iflags); - workposted = true; - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0603 Invalid work queue CQE subtype (x%x)\n", - cq->subtype); - workposted = false; - break; - } - return workposted; -} - -/** - * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry - * @phba: Pointer to HBA context object. - * @cq: Pointer to the completion queue. - * @wcqe: Pointer to a completion queue entry. - * - * This routine process a slow-path work-queue completion queue entry. - * - * Return: true if work posted to worker thread, otherwise false. - **/ -static bool -lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, - struct lpfc_cqe *cqe) -{ - struct lpfc_wcqe_complete wcqe; - bool workposted = false; - - /* Copy the work queue CQE and convert endian order if needed */ - lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); - - /* Check and process for different type of WCQE and dispatch */ - switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { - case CQE_CODE_COMPL_WQE: - /* Process the WQ complete event */ - workposted = lpfc_sli4_sp_handle_els_wcqe(phba, - (struct lpfc_wcqe_complete *)&wcqe); - break; - case CQE_CODE_RELEASE_WQE: - /* Process the WQ release event */ - lpfc_sli4_sp_handle_rel_wcqe(phba, - (struct lpfc_wcqe_release *)&wcqe); - break; - case CQE_CODE_XRI_ABORTED: - /* Process the WQ XRI abort event */ - workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, - (struct sli4_wcqe_xri_aborted *)&wcqe); - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0388 Not a valid WCQE code: x%x\n", - bf_get(lpfc_wcqe_c_code, &wcqe)); - break; - } - return workposted; -} - -/** - * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry - * @phba: Pointer to HBA context object. - * @rcqe: Pointer to receive-queue completion queue entry. - * - * This routine process a receive-queue completion queue entry. - * - * Return: true if work posted to worker thread, otherwise false. - **/ -static bool -lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe) -{ - struct lpfc_rcqe rcqe; - bool workposted = false; - struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq; - struct lpfc_queue *drq = phba->sli4_hba.dat_rq; - struct hbq_dmabuf *dma_buf; - uint32_t status; - unsigned long iflags; - - /* Copy the receive queue CQE and convert endian order if needed */ - lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe)); - lpfc_sli4_rq_release(hrq, drq); - if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE) - goto out; - if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id) - goto out; - - status = bf_get(lpfc_rcqe_status, &rcqe); - switch (status) { - case FC_STATUS_RQ_BUF_LEN_EXCEEDED: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2537 Receive Frame Truncated!!\n"); - case FC_STATUS_RQ_SUCCESS: - spin_lock_irqsave(&phba->hbalock, iflags); - dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list); - if (!dma_buf) { - spin_unlock_irqrestore(&phba->hbalock, iflags); - goto out; - } - memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe)); - /* save off the frame for the word thread to process */ - list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list); - /* Frame received */ - phba->hba_flag |= HBA_RECEIVE_BUFFER; - spin_unlock_irqrestore(&phba->hbalock, iflags); - workposted = true; - break; - case FC_STATUS_INSUFF_BUF_NEED_BUF: - case FC_STATUS_INSUFF_BUF_FRM_DISC: - /* Post more buffers if possible */ - spin_lock_irqsave(&phba->hbalock, iflags); - phba->hba_flag |= HBA_POST_RECEIVE_BUFFER; - spin_unlock_irqrestore(&phba->hbalock, iflags); - workposted = true; - break; - } -out: - return workposted; - -} - -/** - * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry - * @phba: Pointer to HBA context object. - * @eqe: Pointer to fast-path event queue entry. - * - * This routine process a event queue entry from the slow-path event queue. - * It will check the MajorCode and MinorCode to determine this is for a - * completion event on a completion queue, if not, an error shall be logged - * and just return. Otherwise, it will get to the corresponding completion - * queue and process all the entries on that completion queue, rearm the - * completion queue, and then return. - * - **/ -static void -lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) -{ - struct lpfc_queue *cq = NULL, *childq, *speq; - struct lpfc_cqe *cqe; - bool workposted = false; - int ecount = 0; - uint16_t cqid; - - if (bf_get(lpfc_eqe_major_code, eqe) != 0 || - bf_get(lpfc_eqe_minor_code, eqe) != 0) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0359 Not a valid slow-path completion " - "event: majorcode=x%x, minorcode=x%x\n", - bf_get(lpfc_eqe_major_code, eqe), - bf_get(lpfc_eqe_minor_code, eqe)); - return; - } - - /* Get the reference to the corresponding CQ */ - cqid = bf_get(lpfc_eqe_resource_id, eqe); - - /* Search for completion queue pointer matching this cqid */ - speq = phba->sli4_hba.sp_eq; - list_for_each_entry(childq, &speq->child_list, list) { - if (childq->queue_id == cqid) { - cq = childq; - break; - } - } - if (unlikely(!cq)) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0365 Slow-path CQ identifier (%d) does " - "not exist\n", cqid); - return; - } - - /* Process all the entries to the CQ */ - switch (cq->type) { - case LPFC_MCQ: - while ((cqe = lpfc_sli4_cq_get(cq))) { - workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); - if (!(++ecount % LPFC_GET_QE_REL_INT)) - lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); - } - break; - case LPFC_WCQ: - while ((cqe = lpfc_sli4_cq_get(cq))) { - workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe); - if (!(++ecount % LPFC_GET_QE_REL_INT)) - lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); - } - break; - case LPFC_RCQ: - while ((cqe = lpfc_sli4_cq_get(cq))) { - workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe); - if (!(++ecount % LPFC_GET_QE_REL_INT)) - lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); - } - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0370 Invalid completion queue type (%d)\n", - cq->type); - return; - } - - /* Catch the no cq entry condition, log an error */ - if (unlikely(ecount == 0)) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0371 No entry from the CQ: identifier " - "(x%x), type (%d)\n", cq->queue_id, cq->type); - - /* In any case, flash and re-arm the RCQ */ - lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); - - /* wake up worker thread if there are works to be done */ - if (workposted) - lpfc_worker_wake_up(phba); -} - -/** - * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry - * @eqe: Pointer to fast-path completion queue entry. - * - * This routine process a fast-path work queue completion entry from fast-path - * event queue for FCP command response completion. - **/ -static void -lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, - struct lpfc_wcqe_complete *wcqe) -{ - struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING]; - struct lpfc_iocbq *cmdiocbq; - struct lpfc_iocbq irspiocbq; - unsigned long iflags; - - spin_lock_irqsave(&phba->hbalock, iflags); - pring->stats.iocb_event++; - spin_unlock_irqrestore(&phba->hbalock, iflags); - - /* Check for response status */ - if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { - /* If resource errors reported from HBA, reduce queue - * depth of the SCSI device. - */ - if ((bf_get(lpfc_wcqe_c_status, wcqe) == - IOSTAT_LOCAL_REJECT) && - (wcqe->parameter == IOERR_NO_RESOURCES)) { - phba->lpfc_rampdown_queue_depth(phba); - } - /* Log the error status */ - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "0373 FCP complete error: status=x%x, " - "hw_status=x%x, total_data_specified=%d, " - "parameter=x%x, word3=x%x\n", - bf_get(lpfc_wcqe_c_status, wcqe), - bf_get(lpfc_wcqe_c_hw_status, wcqe), - wcqe->total_data_placed, wcqe->parameter, - wcqe->word3); - } - - /* Look up the FCP command IOCB and create pseudo response IOCB */ - spin_lock_irqsave(&phba->hbalock, iflags); - cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring, - bf_get(lpfc_wcqe_c_request_tag, wcqe)); - spin_unlock_irqrestore(&phba->hbalock, iflags); - if (unlikely(!cmdiocbq)) { - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "0374 FCP complete with no corresponding " - "cmdiocb: iotag (%d)\n", - bf_get(lpfc_wcqe_c_request_tag, wcqe)); - return; - } - if (unlikely(!cmdiocbq->iocb_cmpl)) { - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "0375 FCP cmdiocb not callback function " - "iotag: (%d)\n", - bf_get(lpfc_wcqe_c_request_tag, wcqe)); - return; - } - - /* Fake the irspiocb and copy necessary response information */ - lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe); - - /* Pass the cmd_iocb and the rsp state to the upper layer */ - (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); -} - -/** - * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event - * @phba: Pointer to HBA context object. - * @cq: Pointer to completion queue. - * @wcqe: Pointer to work-queue completion queue entry. - * - * This routine handles an fast-path WQ entry comsumed event by invoking the - * proper WQ release routine to the slow-path WQ. - **/ -static void -lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, - struct lpfc_wcqe_release *wcqe) -{ - struct lpfc_queue *childwq; - bool wqid_matched = false; - uint16_t fcp_wqid; - - /* Check for fast-path FCP work queue release */ - fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe); - list_for_each_entry(childwq, &cq->child_list, list) { - if (childwq->queue_id == fcp_wqid) { - lpfc_sli4_wq_release(childwq, - bf_get(lpfc_wcqe_r_wqe_index, wcqe)); - wqid_matched = true; - break; - } - } - /* Report warning log message if no match found */ - if (wqid_matched != true) - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "2580 Fast-path wqe consume event carries " - "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid); -} - -/** - * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry - * @cq: Pointer to the completion queue. - * @eqe: Pointer to fast-path completion queue entry. - * - * This routine process a fast-path work queue completion entry from fast-path - * event queue for FCP command response completion. - **/ -static int -lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, - struct lpfc_cqe *cqe) -{ - struct lpfc_wcqe_release wcqe; - bool workposted = false; - - /* Copy the work queue CQE and convert endian order if needed */ - lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe)); - - /* Check and process for different type of WCQE and dispatch */ - switch (bf_get(lpfc_wcqe_c_code, &wcqe)) { - case CQE_CODE_COMPL_WQE: - /* Process the WQ complete event */ - lpfc_sli4_fp_handle_fcp_wcqe(phba, - (struct lpfc_wcqe_complete *)&wcqe); - break; - case CQE_CODE_RELEASE_WQE: - /* Process the WQ release event */ - lpfc_sli4_fp_handle_rel_wcqe(phba, cq, - (struct lpfc_wcqe_release *)&wcqe); - break; - case CQE_CODE_XRI_ABORTED: - /* Process the WQ XRI abort event */ - workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq, - (struct sli4_wcqe_xri_aborted *)&wcqe); - break; - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0144 Not a valid WCQE code: x%x\n", - bf_get(lpfc_wcqe_c_code, &wcqe)); - break; - } - return workposted; -} - -/** - * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry - * @phba: Pointer to HBA context object. - * @eqe: Pointer to fast-path event queue entry. - * - * This routine process a event queue entry from the fast-path event queue. - * It will check the MajorCode and MinorCode to determine this is for a - * completion event on a completion queue, if not, an error shall be logged - * and just return. Otherwise, it will get to the corresponding completion - * queue and process all the entries on the completion queue, rearm the - * completion queue, and then return. - **/ -static void -lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, - uint32_t fcp_cqidx) -{ - struct lpfc_queue *cq; - struct lpfc_cqe *cqe; - bool workposted = false; - uint16_t cqid; - int ecount = 0; - - if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) || - unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0366 Not a valid fast-path completion " - "event: majorcode=x%x, minorcode=x%x\n", - bf_get(lpfc_eqe_major_code, eqe), - bf_get(lpfc_eqe_minor_code, eqe)); - return; - } - - cq = phba->sli4_hba.fcp_cq[fcp_cqidx]; - if (unlikely(!cq)) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0367 Fast-path completion queue does not " - "exist\n"); - return; - } - - /* Get the reference to the corresponding CQ */ - cqid = bf_get(lpfc_eqe_resource_id, eqe); - if (unlikely(cqid != cq->queue_id)) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0368 Miss-matched fast-path completion " - "queue identifier: eqcqid=%d, fcpcqid=%d\n", - cqid, cq->queue_id); - return; - } - - /* Process all the entries to the CQ */ - while ((cqe = lpfc_sli4_cq_get(cq))) { - workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); - if (!(++ecount % LPFC_GET_QE_REL_INT)) - lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); - } - - /* Catch the no cq entry condition */ - if (unlikely(ecount == 0)) - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0369 No entry from fast-path completion " - "queue fcpcqid=%d\n", cq->queue_id); - - /* In any case, flash and re-arm the CQ */ - lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); - - /* wake up worker thread if there are works to be done */ - if (workposted) - lpfc_worker_wake_up(phba); -} - -static void -lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) -{ - struct lpfc_eqe *eqe; - - /* walk all the EQ entries and drop on the floor */ - while ((eqe = lpfc_sli4_eq_get(eq))) - ; - - /* Clear and re-arm the EQ */ - lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); -} - -/** - * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device - * @irq: Interrupt number. - * @dev_id: The device context pointer. - * - * This function is directly called from the PCI layer as an interrupt - * service routine when device with SLI-4 interface spec is enabled with - * MSI-X multi-message interrupt mode and there are slow-path events in - * the HBA. However, when the device is enabled with either MSI or Pin-IRQ - * interrupt mode, this function is called as part of the device-level - * interrupt handler. When the PCI slot is in error recovery or the HBA is - * undergoing initialization, the interrupt handler will not process the - * interrupt. The link attention and ELS ring attention events are handled - * by the worker thread. The interrupt handler signals the worker thread - * and returns for these events. This function is called without any lock - * held. It gets the hbalock to access and update SLI data structures. - * - * This function returns IRQ_HANDLED when interrupt is handled else it - * returns IRQ_NONE. - **/ -irqreturn_t -lpfc_sli4_sp_intr_handler(int irq, void *dev_id) -{ - struct lpfc_hba *phba; - struct lpfc_queue *speq; - struct lpfc_eqe *eqe; - unsigned long iflag; - int ecount = 0; - - /* - * Get the driver's phba structure from the dev_id - */ - phba = (struct lpfc_hba *)dev_id; - - if (unlikely(!phba)) - return IRQ_NONE; - - /* Get to the EQ struct associated with this vector */ - speq = phba->sli4_hba.sp_eq; - - /* Check device state for handling interrupt */ - if (unlikely(lpfc_intr_state_check(phba))) { - /* Check again for link_state with lock held */ - spin_lock_irqsave(&phba->hbalock, iflag); - if (phba->link_state < LPFC_LINK_DOWN) - /* Flush, clear interrupt, and rearm the EQ */ - lpfc_sli4_eq_flush(phba, speq); - spin_unlock_irqrestore(&phba->hbalock, iflag); - return IRQ_NONE; - } - - /* - * Process all the event on FCP slow-path EQ - */ - while ((eqe = lpfc_sli4_eq_get(speq))) { - lpfc_sli4_sp_handle_eqe(phba, eqe); - if (!(++ecount % LPFC_GET_QE_REL_INT)) - lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM); - } - - /* Always clear and re-arm the slow-path EQ */ - lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM); - - /* Catch the no cq entry condition */ - if (unlikely(ecount == 0)) { - if (phba->intr_type == MSIX) - /* MSI-X treated interrupt served as no EQ share INT */ - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "0357 MSI-X interrupt with no EQE\n"); - else - /* Non MSI-X treated on interrupt as EQ share INT */ - return IRQ_NONE; - } - - return IRQ_HANDLED; -} /* lpfc_sli4_sp_intr_handler */ - -/** - * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device - * @irq: Interrupt number. - * @dev_id: The device context pointer. - * - * This function is directly called from the PCI layer as an interrupt - * service routine when device with SLI-4 interface spec is enabled with - * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB - * ring event in the HBA. However, when the device is enabled with either - * MSI or Pin-IRQ interrupt mode, this function is called as part of the - * device-level interrupt handler. When the PCI slot is in error recovery - * or the HBA is undergoing initialization, the interrupt handler will not - * process the interrupt. The SCSI FCP fast-path ring event are handled in - * the intrrupt context. This function is called without any lock held. - * It gets the hbalock to access and update SLI data structures. Note that, - * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is - * equal to that of FCP CQ index. - * - * This function returns IRQ_HANDLED when interrupt is handled else it - * returns IRQ_NONE. - **/ -irqreturn_t -lpfc_sli4_fp_intr_handler(int irq, void *dev_id) -{ - struct lpfc_hba *phba; - struct lpfc_fcp_eq_hdl *fcp_eq_hdl; - struct lpfc_queue *fpeq; - struct lpfc_eqe *eqe; - unsigned long iflag; - int ecount = 0; - uint32_t fcp_eqidx; - - /* Get the driver's phba structure from the dev_id */ - fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; - phba = fcp_eq_hdl->phba; - fcp_eqidx = fcp_eq_hdl->idx; - - if (unlikely(!phba)) - return IRQ_NONE; - - /* Get to the EQ struct associated with this vector */ - fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; - - /* Check device state for handling interrupt */ - if (unlikely(lpfc_intr_state_check(phba))) { - /* Check again for link_state with lock held */ - spin_lock_irqsave(&phba->hbalock, iflag); - if (phba->link_state < LPFC_LINK_DOWN) - /* Flush, clear interrupt, and rearm the EQ */ - lpfc_sli4_eq_flush(phba, fpeq); - spin_unlock_irqrestore(&phba->hbalock, iflag); - return IRQ_NONE; - } - - /* - * Process all the event on FCP fast-path EQ - */ - while ((eqe = lpfc_sli4_eq_get(fpeq))) { - lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); - if (!(++ecount % LPFC_GET_QE_REL_INT)) - lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); - } - - /* Always clear and re-arm the fast-path EQ */ - lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); - - if (unlikely(ecount == 0)) { - if (phba->intr_type == MSIX) - /* MSI-X treated interrupt served as no EQ share INT */ - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "0358 MSI-X interrupt with no EQE\n"); - else - /* Non MSI-X treated on interrupt as EQ share INT */ - return IRQ_NONE; - } - - return IRQ_HANDLED; -} /* lpfc_sli4_fp_intr_handler */ - -/** - * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device - * @irq: Interrupt number. - * @dev_id: The device context pointer. - * - * This function is the device-level interrupt handler to device with SLI-4 - * interface spec, called from the PCI layer when either MSI or Pin-IRQ - * interrupt mode is enabled and there is an event in the HBA which requires - * driver attention. This function invokes the slow-path interrupt attention - * handling function and fast-path interrupt attention handling function in - * turn to process the relevant HBA attention events. This function is called - * without any lock held. It gets the hbalock to access and update SLI data - * structures. - * - * This function returns IRQ_HANDLED when interrupt is handled, else it - * returns IRQ_NONE. - **/ -irqreturn_t -lpfc_sli4_intr_handler(int irq, void *dev_id) -{ - struct lpfc_hba *phba; - irqreturn_t sp_irq_rc, fp_irq_rc; - bool fp_handled = false; - uint32_t fcp_eqidx; - - /* Get the driver's phba structure from the dev_id */ - phba = (struct lpfc_hba *)dev_id; - - if (unlikely(!phba)) - return IRQ_NONE; - - /* - * Invokes slow-path host attention interrupt handling as appropriate. - */ - sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id); - - /* - * Invoke fast-path host attention interrupt handling as appropriate. - */ - for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { - fp_irq_rc = lpfc_sli4_fp_intr_handler(irq, - &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]); - if (fp_irq_rc == IRQ_HANDLED) - fp_handled |= true; - } - - return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc; -} /* lpfc_sli4_intr_handler */ - -/** - * lpfc_sli4_queue_free - free a queue structure and associated memory - * @queue: The queue structure to free. - * - * This function frees a queue structure and the DMAable memeory used for - * the host resident queue. This function must be called after destroying the - * queue on the HBA. - **/ -void -lpfc_sli4_queue_free(struct lpfc_queue *queue) -{ - struct lpfc_dmabuf *dmabuf; - - if (!queue) - return; - - while (!list_empty(&queue->page_list)) { - list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf, - list); - dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE, - dmabuf->virt, dmabuf->phys); - kfree(dmabuf); - } - kfree(queue); - return; -} - -/** - * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure - * @phba: The HBA that this queue is being created on. - * @entry_size: The size of each queue entry for this queue. - * @entry count: The number of entries that this queue will handle. - * - * This function allocates a queue structure and the DMAable memory used for - * the host resident queue. This function must be called before creating the - * queue on the HBA. - **/ -struct lpfc_queue * -lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, - uint32_t entry_count) -{ - struct lpfc_queue *queue; - struct lpfc_dmabuf *dmabuf; - int x, total_qe_count; - void *dma_pointer; - - - queue = kzalloc(sizeof(struct lpfc_queue) + - (sizeof(union sli4_qe) * entry_count), GFP_KERNEL); - if (!queue) - return NULL; - queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE; - INIT_LIST_HEAD(&queue->list); - INIT_LIST_HEAD(&queue->page_list); - INIT_LIST_HEAD(&queue->child_list); - for (x = 0, total_qe_count = 0; x < queue->page_count; x++) { - dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); - if (!dmabuf) - goto out_fail; - dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, - PAGE_SIZE, &dmabuf->phys, - GFP_KERNEL); - if (!dmabuf->virt) { - kfree(dmabuf); - goto out_fail; - } - dmabuf->buffer_tag = x; - list_add_tail(&dmabuf->list, &queue->page_list); - /* initialize queue's entry array */ - dma_pointer = dmabuf->virt; - for (; total_qe_count < entry_count && - dma_pointer < (PAGE_SIZE + dmabuf->virt); - total_qe_count++, dma_pointer += entry_size) { - queue->qe[total_qe_count].address = dma_pointer; - } - } - queue->entry_size = entry_size; - queue->entry_count = entry_count; - queue->phba = phba; - - return queue; -out_fail: - lpfc_sli4_queue_free(queue); - return NULL; -} - -/** - * lpfc_eq_create - Create an Event Queue on the HBA - * @phba: HBA structure that indicates port to create a queue on. - * @eq: The queue structure to use to create the event queue. - * @imax: The maximum interrupt per second limit. - * - * This function creates an event queue, as detailed in @eq, on a port, - * described by @phba by sending an EQ_CREATE mailbox command to the HBA. - * - * The @phba struct is used to send mailbox command to HBA. The @eq struct - * is used to get the entry count and entry size that are necessary to - * determine the number of pages to allocate and use for this queue. This - * function will send the EQ_CREATE mailbox command to the HBA to setup the - * event queue. This function is asynchronous and will wait for the mailbox - * command to finish before continuing. - * - * On success this function will return a zero. If unable to allocate enough - * memory this function will return ENOMEM. If the queue create mailbox command - * fails this function will return ENXIO. - **/ -uint32_t -lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax) -{ - struct lpfc_mbx_eq_create *eq_create; - LPFC_MBOXQ_t *mbox; - int rc, length, status = 0; - struct lpfc_dmabuf *dmabuf; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - uint16_t dmult; - - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - return -ENOMEM; - length = (sizeof(struct lpfc_mbx_eq_create) - - sizeof(struct lpfc_sli4_cfg_mhdr)); - lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, - LPFC_MBOX_OPCODE_EQ_CREATE, - length, LPFC_SLI4_MBX_EMBED); - eq_create = &mbox->u.mqe.un.eq_create; - bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request, - eq->page_count); - bf_set(lpfc_eq_context_size, &eq_create->u.request.context, - LPFC_EQE_SIZE); - bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1); - /* Calculate delay multiper from maximum interrupt per second */ - dmult = LPFC_DMULT_CONST/imax - 1; - bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context, - dmult); - switch (eq->entry_count) { - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0360 Unsupported EQ count. (%d)\n", - eq->entry_count); - if (eq->entry_count < 256) - return -EINVAL; - /* otherwise default to smallest count (drop through) */ - case 256: - bf_set(lpfc_eq_context_count, &eq_create->u.request.context, - LPFC_EQ_CNT_256); - break; - case 512: - bf_set(lpfc_eq_context_count, &eq_create->u.request.context, - LPFC_EQ_CNT_512); - break; - case 1024: - bf_set(lpfc_eq_context_count, &eq_create->u.request.context, - LPFC_EQ_CNT_1024); - break; - case 2048: - bf_set(lpfc_eq_context_count, &eq_create->u.request.context, - LPFC_EQ_CNT_2048); - break; - case 4096: - bf_set(lpfc_eq_context_count, &eq_create->u.request.context, - LPFC_EQ_CNT_4096); - break; - } - list_for_each_entry(dmabuf, &eq->page_list, list) { - eq_create->u.request.page[dmabuf->buffer_tag].addr_lo = - putPaddrLow(dmabuf->phys); - eq_create->u.request.page[dmabuf->buffer_tag].addr_hi = - putPaddrHigh(dmabuf->phys); - } - mbox->vport = phba->pport; - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - mbox->context1 = NULL; - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2500 EQ_CREATE mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); - status = -ENXIO; - } - eq->type = LPFC_EQ; - eq->subtype = LPFC_NONE; - eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response); - if (eq->queue_id == 0xFFFF) - status = -ENXIO; - eq->host_index = 0; - eq->hba_index = 0; - - if (rc != MBX_TIMEOUT) - mempool_free(mbox, phba->mbox_mem_pool); - return status; -} - -/** - * lpfc_cq_create - Create a Completion Queue on the HBA - * @phba: HBA structure that indicates port to create a queue on. - * @cq: The queue structure to use to create the completion queue. - * @eq: The event queue to bind this completion queue to. - * - * This function creates a completion queue, as detailed in @wq, on a port, - * described by @phba by sending a CQ_CREATE mailbox command to the HBA. - * - * The @phba struct is used to send mailbox command to HBA. The @cq struct - * is used to get the entry count and entry size that are necessary to - * determine the number of pages to allocate and use for this queue. The @eq - * is used to indicate which event queue to bind this completion queue to. This - * function will send the CQ_CREATE mailbox command to the HBA to setup the - * completion queue. This function is asynchronous and will wait for the mailbox - * command to finish before continuing. - * - * On success this function will return a zero. If unable to allocate enough - * memory this function will return ENOMEM. If the queue create mailbox command - * fails this function will return ENXIO. - **/ -uint32_t -lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, - struct lpfc_queue *eq, uint32_t type, uint32_t subtype) -{ - struct lpfc_mbx_cq_create *cq_create; - struct lpfc_dmabuf *dmabuf; - LPFC_MBOXQ_t *mbox; - int rc, length, status = 0; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - return -ENOMEM; - length = (sizeof(struct lpfc_mbx_cq_create) - - sizeof(struct lpfc_sli4_cfg_mhdr)); - lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, - LPFC_MBOX_OPCODE_CQ_CREATE, - length, LPFC_SLI4_MBX_EMBED); - cq_create = &mbox->u.mqe.un.cq_create; - bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request, - cq->page_count); - bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1); - bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1); - bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id); - switch (cq->entry_count) { - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0361 Unsupported CQ count. (%d)\n", - cq->entry_count); - if (cq->entry_count < 256) - return -EINVAL; - /* otherwise default to smallest count (drop through) */ - case 256: - bf_set(lpfc_cq_context_count, &cq_create->u.request.context, - LPFC_CQ_CNT_256); - break; - case 512: - bf_set(lpfc_cq_context_count, &cq_create->u.request.context, - LPFC_CQ_CNT_512); - break; - case 1024: - bf_set(lpfc_cq_context_count, &cq_create->u.request.context, - LPFC_CQ_CNT_1024); - break; - } - list_for_each_entry(dmabuf, &cq->page_list, list) { - cq_create->u.request.page[dmabuf->buffer_tag].addr_lo = - putPaddrLow(dmabuf->phys); - cq_create->u.request.page[dmabuf->buffer_tag].addr_hi = - putPaddrHigh(dmabuf->phys); - } - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - - /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2501 CQ_CREATE mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); - status = -ENXIO; - goto out; - } - cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); - if (cq->queue_id == 0xFFFF) { - status = -ENXIO; - goto out; - } - /* link the cq onto the parent eq child list */ - list_add_tail(&cq->list, &eq->child_list); - /* Set up completion queue's type and subtype */ - cq->type = type; - cq->subtype = subtype; - cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response); - cq->host_index = 0; - cq->hba_index = 0; -out: - - if (rc != MBX_TIMEOUT) - mempool_free(mbox, phba->mbox_mem_pool); - return status; -} - -/** - * lpfc_mq_create - Create a mailbox Queue on the HBA - * @phba: HBA structure that indicates port to create a queue on. - * @mq: The queue structure to use to create the mailbox queue. - * - * This function creates a mailbox queue, as detailed in @mq, on a port, - * described by @phba by sending a MQ_CREATE mailbox command to the HBA. - * - * The @phba struct is used to send mailbox command to HBA. The @cq struct - * is used to get the entry count and entry size that are necessary to - * determine the number of pages to allocate and use for this queue. This - * function will send the MQ_CREATE mailbox command to the HBA to setup the - * mailbox queue. This function is asynchronous and will wait for the mailbox - * command to finish before continuing. - * - * On success this function will return a zero. If unable to allocate enough - * memory this function will return ENOMEM. If the queue create mailbox command - * fails this function will return ENXIO. - **/ -uint32_t -lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, - struct lpfc_queue *cq, uint32_t subtype) -{ - struct lpfc_mbx_mq_create *mq_create; - struct lpfc_dmabuf *dmabuf; - LPFC_MBOXQ_t *mbox; - int rc, length, status = 0; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - return -ENOMEM; - length = (sizeof(struct lpfc_mbx_mq_create) - - sizeof(struct lpfc_sli4_cfg_mhdr)); - lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, - LPFC_MBOX_OPCODE_MQ_CREATE, - length, LPFC_SLI4_MBX_EMBED); - mq_create = &mbox->u.mqe.un.mq_create; - bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request, - mq->page_count); - bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context, - cq->queue_id); - bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1); - switch (mq->entry_count) { - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0362 Unsupported MQ count. (%d)\n", - mq->entry_count); - if (mq->entry_count < 16) - return -EINVAL; - /* otherwise default to smallest count (drop through) */ - case 16: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, - LPFC_MQ_CNT_16); - break; - case 32: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, - LPFC_MQ_CNT_32); - break; - case 64: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, - LPFC_MQ_CNT_64); - break; - case 128: - bf_set(lpfc_mq_context_count, &mq_create->u.request.context, - LPFC_MQ_CNT_128); - break; - } - list_for_each_entry(dmabuf, &mq->page_list, list) { - mq_create->u.request.page[dmabuf->buffer_tag].addr_lo = - putPaddrLow(dmabuf->phys); - mq_create->u.request.page[dmabuf->buffer_tag].addr_hi = - putPaddrHigh(dmabuf->phys); - } - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2502 MQ_CREATE mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); - status = -ENXIO; - goto out; - } - mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response); - if (mq->queue_id == 0xFFFF) { - status = -ENXIO; - goto out; - } - mq->type = LPFC_MQ; - mq->subtype = subtype; - mq->host_index = 0; - mq->hba_index = 0; - - /* link the mq onto the parent cq child list */ - list_add_tail(&mq->list, &cq->child_list); -out: - if (rc != MBX_TIMEOUT) - mempool_free(mbox, phba->mbox_mem_pool); - return status; -} - -/** - * lpfc_wq_create - Create a Work Queue on the HBA - * @phba: HBA structure that indicates port to create a queue on. - * @wq: The queue structure to use to create the work queue. - * @cq: The completion queue to bind this work queue to. - * @subtype: The subtype of the work queue indicating its functionality. - * - * This function creates a work queue, as detailed in @wq, on a port, described - * by @phba by sending a WQ_CREATE mailbox command to the HBA. - * - * The @phba struct is used to send mailbox command to HBA. The @wq struct - * is used to get the entry count and entry size that are necessary to - * determine the number of pages to allocate and use for this queue. The @cq - * is used to indicate which completion queue to bind this work queue to. This - * function will send the WQ_CREATE mailbox command to the HBA to setup the - * work queue. This function is asynchronous and will wait for the mailbox - * command to finish before continuing. - * - * On success this function will return a zero. If unable to allocate enough - * memory this function will return ENOMEM. If the queue create mailbox command - * fails this function will return ENXIO. - **/ -uint32_t -lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, - struct lpfc_queue *cq, uint32_t subtype) -{ - struct lpfc_mbx_wq_create *wq_create; - struct lpfc_dmabuf *dmabuf; - LPFC_MBOXQ_t *mbox; - int rc, length, status = 0; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - return -ENOMEM; - length = (sizeof(struct lpfc_mbx_wq_create) - - sizeof(struct lpfc_sli4_cfg_mhdr)); - lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_WQ_CREATE, - length, LPFC_SLI4_MBX_EMBED); - wq_create = &mbox->u.mqe.un.wq_create; - bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request, - wq->page_count); - bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request, - cq->queue_id); - list_for_each_entry(dmabuf, &wq->page_list, list) { - wq_create->u.request.page[dmabuf->buffer_tag].addr_lo = - putPaddrLow(dmabuf->phys); - wq_create->u.request.page[dmabuf->buffer_tag].addr_hi = - putPaddrHigh(dmabuf->phys); - } - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2503 WQ_CREATE mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); - status = -ENXIO; - goto out; - } - wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response); - if (wq->queue_id == 0xFFFF) { - status = -ENXIO; - goto out; - } - wq->type = LPFC_WQ; - wq->subtype = subtype; - wq->host_index = 0; - wq->hba_index = 0; - - /* link the wq onto the parent cq child list */ - list_add_tail(&wq->list, &cq->child_list); -out: - if (rc == MBX_TIMEOUT) - mempool_free(mbox, phba->mbox_mem_pool); - return status; -} - -/** - * lpfc_rq_create - Create a Receive Queue on the HBA - * @phba: HBA structure that indicates port to create a queue on. - * @hrq: The queue structure to use to create the header receive queue. - * @drq: The queue structure to use to create the data receive queue. - * @cq: The completion queue to bind this work queue to. - * - * This function creates a receive buffer queue pair , as detailed in @hrq and - * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command - * to the HBA. - * - * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq - * struct is used to get the entry count that is necessary to determine the - * number of pages to use for this queue. The @cq is used to indicate which - * completion queue to bind received buffers that are posted to these queues to. - * This function will send the RQ_CREATE mailbox command to the HBA to setup the - * receive queue pair. This function is asynchronous and will wait for the - * mailbox command to finish before continuing. - * - * On success this function will return a zero. If unable to allocate enough - * memory this function will return ENOMEM. If the queue create mailbox command - * fails this function will return ENXIO. - **/ -uint32_t -lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, - struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) -{ - struct lpfc_mbx_rq_create *rq_create; - struct lpfc_dmabuf *dmabuf; - LPFC_MBOXQ_t *mbox; - int rc, length, status = 0; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - if (hrq->entry_count != drq->entry_count) - return -EINVAL; - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - return -ENOMEM; - length = (sizeof(struct lpfc_mbx_rq_create) - - sizeof(struct lpfc_sli4_cfg_mhdr)); - lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, - length, LPFC_SLI4_MBX_EMBED); - rq_create = &mbox->u.mqe.un.rq_create; - switch (hrq->entry_count) { - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2535 Unsupported RQ count. (%d)\n", - hrq->entry_count); - if (hrq->entry_count < 512) - return -EINVAL; - /* otherwise default to smallest count (drop through) */ - case 512: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_512); - break; - case 1024: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_1024); - break; - case 2048: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_2048); - break; - case 4096: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_4096); - break; - } - bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, - cq->queue_id); - bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, - hrq->page_count); - bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, - LPFC_HDR_BUF_SIZE); - list_for_each_entry(dmabuf, &hrq->page_list, list) { - rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = - putPaddrLow(dmabuf->phys); - rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = - putPaddrHigh(dmabuf->phys); - } - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2504 RQ_CREATE mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); - status = -ENXIO; - goto out; - } - hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); - if (hrq->queue_id == 0xFFFF) { - status = -ENXIO; - goto out; - } - hrq->type = LPFC_HRQ; - hrq->subtype = subtype; - hrq->host_index = 0; - hrq->hba_index = 0; - - /* now create the data queue */ - lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, - length, LPFC_SLI4_MBX_EMBED); - switch (drq->entry_count) { - default: - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2536 Unsupported RQ count. (%d)\n", - drq->entry_count); - if (drq->entry_count < 512) - return -EINVAL; - /* otherwise default to smallest count (drop through) */ - case 512: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_512); - break; - case 1024: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_1024); - break; - case 2048: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_2048); - break; - case 4096: - bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context, - LPFC_RQ_RING_SIZE_4096); - break; - } - bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context, - cq->queue_id); - bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request, - drq->page_count); - bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context, - LPFC_DATA_BUF_SIZE); - list_for_each_entry(dmabuf, &drq->page_list, list) { - rq_create->u.request.page[dmabuf->buffer_tag].addr_lo = - putPaddrLow(dmabuf->phys); - rq_create->u.request.page[dmabuf->buffer_tag].addr_hi = - putPaddrHigh(dmabuf->phys); - } - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (shdr_status || shdr_add_status || rc) { - status = -ENXIO; - goto out; - } - drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response); - if (drq->queue_id == 0xFFFF) { - status = -ENXIO; - goto out; - } - drq->type = LPFC_DRQ; - drq->subtype = subtype; - drq->host_index = 0; - drq->hba_index = 0; - - /* link the header and data RQs onto the parent cq child list */ - list_add_tail(&hrq->list, &cq->child_list); - list_add_tail(&drq->list, &cq->child_list); - -out: - if (rc != MBX_TIMEOUT) - mempool_free(mbox, phba->mbox_mem_pool); - return status; -} - -/** - * lpfc_eq_destroy - Destroy an event Queue on the HBA - * @eq: The queue structure associated with the queue to destroy. - * - * This function destroys a queue, as detailed in @eq by sending an mailbox - * command, specific to the type of queue, to the HBA. - * - * The @eq struct is used to get the queue ID of the queue to destroy. - * - * On success this function will return a zero. If the queue destroy mailbox - * command fails this function will return ENXIO. - **/ -uint32_t -lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) -{ - LPFC_MBOXQ_t *mbox; - int rc, length, status = 0; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - if (!eq) - return -ENODEV; - mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - return -ENOMEM; - length = (sizeof(struct lpfc_mbx_eq_destroy) - - sizeof(struct lpfc_sli4_cfg_mhdr)); - lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, - LPFC_MBOX_OPCODE_EQ_DESTROY, - length, LPFC_SLI4_MBX_EMBED); - bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request, - eq->queue_id); - mbox->vport = eq->phba->pport; - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - - rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL); - /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) - &mbox->u.mqe.un.eq_destroy.header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2505 EQ_DESTROY mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); - status = -ENXIO; - } - - /* Remove eq from any list */ - list_del_init(&eq->list); - if (rc != MBX_TIMEOUT) - mempool_free(mbox, eq->phba->mbox_mem_pool); - return status; -} - -/** - * lpfc_cq_destroy - Destroy a Completion Queue on the HBA - * @cq: The queue structure associated with the queue to destroy. - * - * This function destroys a queue, as detailed in @cq by sending an mailbox - * command, specific to the type of queue, to the HBA. - * - * The @cq struct is used to get the queue ID of the queue to destroy. - * - * On success this function will return a zero. If the queue destroy mailbox - * command fails this function will return ENXIO. - **/ -uint32_t -lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) -{ - LPFC_MBOXQ_t *mbox; - int rc, length, status = 0; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - if (!cq) - return -ENODEV; - mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - return -ENOMEM; - length = (sizeof(struct lpfc_mbx_cq_destroy) - - sizeof(struct lpfc_sli4_cfg_mhdr)); - lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, - LPFC_MBOX_OPCODE_CQ_DESTROY, - length, LPFC_SLI4_MBX_EMBED); - bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request, - cq->queue_id); - mbox->vport = cq->phba->pport; - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL); - /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) - &mbox->u.mqe.un.wq_create.header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2506 CQ_DESTROY mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); - status = -ENXIO; - } - /* Remove cq from any list */ - list_del_init(&cq->list); - if (rc != MBX_TIMEOUT) - mempool_free(mbox, cq->phba->mbox_mem_pool); - return status; -} - -/** - * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA - * @qm: The queue structure associated with the queue to destroy. - * - * This function destroys a queue, as detailed in @mq by sending an mailbox - * command, specific to the type of queue, to the HBA. - * - * The @mq struct is used to get the queue ID of the queue to destroy. - * - * On success this function will return a zero. If the queue destroy mailbox - * command fails this function will return ENXIO. - **/ -uint32_t -lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) -{ - LPFC_MBOXQ_t *mbox; - int rc, length, status = 0; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - if (!mq) - return -ENODEV; - mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - return -ENOMEM; - length = (sizeof(struct lpfc_mbx_mq_destroy) - - sizeof(struct lpfc_sli4_cfg_mhdr)); - lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, - LPFC_MBOX_OPCODE_MQ_DESTROY, - length, LPFC_SLI4_MBX_EMBED); - bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request, - mq->queue_id); - mbox->vport = mq->phba->pport; - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL); - /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) - &mbox->u.mqe.un.mq_destroy.header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2507 MQ_DESTROY mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); - status = -ENXIO; - } - /* Remove mq from any list */ - list_del_init(&mq->list); - if (rc != MBX_TIMEOUT) - mempool_free(mbox, mq->phba->mbox_mem_pool); - return status; -} - -/** - * lpfc_wq_destroy - Destroy a Work Queue on the HBA - * @wq: The queue structure associated with the queue to destroy. - * - * This function destroys a queue, as detailed in @wq by sending an mailbox - * command, specific to the type of queue, to the HBA. - * - * The @wq struct is used to get the queue ID of the queue to destroy. - * - * On success this function will return a zero. If the queue destroy mailbox - * command fails this function will return ENXIO. - **/ -uint32_t -lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) -{ - LPFC_MBOXQ_t *mbox; - int rc, length, status = 0; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - if (!wq) - return -ENODEV; - mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - return -ENOMEM; - length = (sizeof(struct lpfc_mbx_wq_destroy) - - sizeof(struct lpfc_sli4_cfg_mhdr)); - lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY, - length, LPFC_SLI4_MBX_EMBED); - bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request, - wq->queue_id); - mbox->vport = wq->phba->pport; - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); - shdr = (union lpfc_sli4_cfg_shdr *) - &mbox->u.mqe.un.wq_destroy.header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2508 WQ_DESTROY mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); - status = -ENXIO; - } - /* Remove wq from any list */ - list_del_init(&wq->list); - if (rc != MBX_TIMEOUT) - mempool_free(mbox, wq->phba->mbox_mem_pool); - return status; -} - -/** - * lpfc_rq_destroy - Destroy a Receive Queue on the HBA - * @rq: The queue structure associated with the queue to destroy. - * - * This function destroys a queue, as detailed in @rq by sending an mailbox - * command, specific to the type of queue, to the HBA. - * - * The @rq struct is used to get the queue ID of the queue to destroy. - * - * On success this function will return a zero. If the queue destroy mailbox - * command fails this function will return ENXIO. - **/ -uint32_t -lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, - struct lpfc_queue *drq) -{ - LPFC_MBOXQ_t *mbox; - int rc, length, status = 0; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - if (!hrq || !drq) - return -ENODEV; - mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - return -ENOMEM; - length = (sizeof(struct lpfc_mbx_rq_destroy) - - sizeof(struct mbox_header)); - lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY, - length, LPFC_SLI4_MBX_EMBED); - bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, - hrq->queue_id); - mbox->vport = hrq->phba->pport; - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL); - /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) - &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2509 RQ_DESTROY mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); - if (rc != MBX_TIMEOUT) - mempool_free(mbox, hrq->phba->mbox_mem_pool); - return -ENXIO; - } - bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request, - drq->queue_id); - rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL); - shdr = (union lpfc_sli4_cfg_shdr *) - &mbox->u.mqe.un.rq_destroy.header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2510 RQ_DESTROY mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); - status = -ENXIO; - } - list_del_init(&hrq->list); - list_del_init(&drq->list); - if (rc != MBX_TIMEOUT) - mempool_free(mbox, hrq->phba->mbox_mem_pool); - return status; -} - -/** - * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA - * @phba: The virtual port for which this call being executed. - * @pdma_phys_addr0: Physical address of the 1st SGL page. - * @pdma_phys_addr1: Physical address of the 2nd SGL page. - * @xritag: the xritag that ties this io to the SGL pages. - * - * This routine will post the sgl pages for the IO that has the xritag - * that is in the iocbq structure. The xritag is assigned during iocbq - * creation and persists for as long as the driver is loaded. - * if the caller has fewer than 256 scatter gather segments to map then - * pdma_phys_addr1 should be 0. - * If the caller needs to map more than 256 scatter gather segment then - * pdma_phys_addr1 should be a valid physical address. - * physical address for SGLs must be 64 byte aligned. - * If you are going to map 2 SGL's then the first one must have 256 entries - * the second sgl can have between 1 and 256 entries. - * - * Return codes: - * 0 - Success - * -ENXIO, -ENOMEM - Failure - **/ -int -lpfc_sli4_post_sgl(struct lpfc_hba *phba, - dma_addr_t pdma_phys_addr0, - dma_addr_t pdma_phys_addr1, - uint16_t xritag) -{ - struct lpfc_mbx_post_sgl_pages *post_sgl_pages; - LPFC_MBOXQ_t *mbox; - int rc; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - if (xritag == NO_XRI) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "0364 Invalid param:\n"); - return -EINVAL; - } - - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - return -ENOMEM; - - lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, - sizeof(struct lpfc_mbx_post_sgl_pages) - - sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); - - post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *) - &mbox->u.mqe.un.post_sgl_pages; - bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag); - bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1); - - post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo = - cpu_to_le32(putPaddrLow(pdma_phys_addr0)); - post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi = - cpu_to_le32(putPaddrHigh(pdma_phys_addr0)); - - post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo = - cpu_to_le32(putPaddrLow(pdma_phys_addr1)); - post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi = - cpu_to_le32(putPaddrHigh(pdma_phys_addr1)); - if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - else - rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); - /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (rc != MBX_TIMEOUT) - mempool_free(mbox, phba->mbox_mem_pool); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2511 POST_SGL mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); - rc = -ENXIO; - } - return 0; -} -/** - * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA - * @phba: The virtual port for which this call being executed. - * - * This routine will remove all of the sgl pages registered with the hba. - * - * Return codes: - * 0 - Success - * -ENXIO, -ENOMEM - Failure - **/ -int -lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba) -{ - LPFC_MBOXQ_t *mbox; - int rc; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) - return -ENOMEM; - - lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0, - LPFC_SLI4_MBX_EMBED); - if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - else - rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); - /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) - &mbox->u.mqe.un.sli4_config.header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (rc != MBX_TIMEOUT) - mempool_free(mbox, phba->mbox_mem_pool); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2512 REMOVE_ALL_SGL_PAGES mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); - rc = -ENXIO; - } - return rc; -} - -/** - * lpfc_sli4_next_xritag - Get an xritag for the io - * @phba: Pointer to HBA context object. - * - * This function gets an xritag for the iocb. If there is no unused xritag - * it will return 0xffff. - * The function returns the allocated xritag if successful, else returns zero. - * Zero is not a valid xritag. - * The caller is not required to hold any lock. - **/ -uint16_t -lpfc_sli4_next_xritag(struct lpfc_hba *phba) -{ - uint16_t xritag; - - spin_lock_irq(&phba->hbalock); - xritag = phba->sli4_hba.next_xri; - if ((xritag != (uint16_t) -1) && xritag < - (phba->sli4_hba.max_cfg_param.max_xri - + phba->sli4_hba.max_cfg_param.xri_base)) { - phba->sli4_hba.next_xri++; - phba->sli4_hba.max_cfg_param.xri_used++; - spin_unlock_irq(&phba->hbalock); - return xritag; - } - spin_unlock_irq(&phba->hbalock); - - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2004 Failed to allocate XRI.last XRITAG is %d" - " Max XRI is %d, Used XRI is %d\n", - phba->sli4_hba.next_xri, - phba->sli4_hba.max_cfg_param.max_xri, - phba->sli4_hba.max_cfg_param.xri_used); - return -1; -} - -/** - * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to post a block of driver's sgl pages to the - * HBA using non-embedded mailbox command. No Lock is held. This routine - * is only called when the driver is loading and after all IO has been - * stopped. - **/ -int -lpfc_sli4_post_sgl_list(struct lpfc_hba *phba) -{ - struct lpfc_sglq *sglq_entry; - struct lpfc_mbx_post_uembed_sgl_page1 *sgl; - struct sgl_page_pairs *sgl_pg_pairs; - void *viraddr; - LPFC_MBOXQ_t *mbox; - uint32_t reqlen, alloclen, pg_pairs; - uint32_t mbox_tmo; - uint16_t xritag_start = 0; - int els_xri_cnt, rc = 0; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - /* The number of sgls to be posted */ - els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba); - - reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) + - sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); - if (reqlen > PAGE_SIZE) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "2559 Block sgl registration required DMA " - "size (%d) great than a page\n", reqlen); - return -ENOMEM; - } - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2560 Failed to allocate mbox cmd memory\n"); - return -ENOMEM; - } - - /* Allocate DMA memory and set up the non-embedded mailbox command */ - alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, - LPFC_SLI4_MBX_NEMBED); - - if (alloclen < reqlen) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0285 Allocated DMA memory size (%d) is " - "less than the requested DMA memory " - "size (%d)\n", alloclen, reqlen); - lpfc_sli4_mbox_cmd_free(phba, mbox); - return -ENOMEM; - } - - /* Get the first SGE entry from the non-embedded DMA memory */ - if (unlikely(!mbox->sge_array)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, - "2525 Failed to get the non-embedded SGE " - "virtual address\n"); - lpfc_sli4_mbox_cmd_free(phba, mbox); - return -ENOMEM; - } - viraddr = mbox->sge_array->addr[0]; - - /* Set up the SGL pages in the non-embedded DMA pages */ - sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; - sgl_pg_pairs = &sgl->sgl_pg_pairs; - - for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) { - sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs]; - /* Set up the sge entry */ - sgl_pg_pairs->sgl_pg0_addr_lo = - cpu_to_le32(putPaddrLow(sglq_entry->phys)); - sgl_pg_pairs->sgl_pg0_addr_hi = - cpu_to_le32(putPaddrHigh(sglq_entry->phys)); - sgl_pg_pairs->sgl_pg1_addr_lo = - cpu_to_le32(putPaddrLow(0)); - sgl_pg_pairs->sgl_pg1_addr_hi = - cpu_to_le32(putPaddrHigh(0)); - /* Keep the first xritag on the list */ - if (pg_pairs == 0) - xritag_start = sglq_entry->sli4_xritag; - sgl_pg_pairs++; - } - bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); - pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs; - bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); - /* Perform endian conversion if necessary */ - sgl->word0 = cpu_to_le32(sgl->word0); - - if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - else { - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); - rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); - } - shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (rc != MBX_TIMEOUT) - lpfc_sli4_mbox_cmd_free(phba, mbox); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2513 POST_SGL_BLOCK mailbox command failed " - "status x%x add_status x%x mbx status x%x\n", - shdr_status, shdr_add_status, rc); - rc = -ENXIO; - } - return rc; -} - -/** - * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware - * @phba: pointer to lpfc hba data structure. - * @sblist: pointer to scsi buffer list. - * @count: number of scsi buffers on the list. - * - * This routine is invoked to post a block of @count scsi sgl pages from a - * SCSI buffer list @sblist to the HBA using non-embedded mailbox command. - * No Lock is held. - * - **/ -int -lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, - int cnt) -{ - struct lpfc_scsi_buf *psb; - struct lpfc_mbx_post_uembed_sgl_page1 *sgl; - struct sgl_page_pairs *sgl_pg_pairs; - void *viraddr; - LPFC_MBOXQ_t *mbox; - uint32_t reqlen, alloclen, pg_pairs; - uint32_t mbox_tmo; - uint16_t xritag_start = 0; - int rc = 0; - uint32_t shdr_status, shdr_add_status; - dma_addr_t pdma_phys_bpl1; - union lpfc_sli4_cfg_shdr *shdr; - - /* Calculate the requested length of the dma memory */ - reqlen = cnt * sizeof(struct sgl_page_pairs) + - sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t); - if (reqlen > PAGE_SIZE) { - lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, - "0217 Block sgl registration required DMA " - "size (%d) great than a page\n", reqlen); - return -ENOMEM; - } - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0283 Failed to allocate mbox cmd memory\n"); - return -ENOMEM; - } - - /* Allocate DMA memory and set up the non-embedded mailbox command */ - alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen, - LPFC_SLI4_MBX_NEMBED); - - if (alloclen < reqlen) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2561 Allocated DMA memory size (%d) is " - "less than the requested DMA memory " - "size (%d)\n", alloclen, reqlen); - lpfc_sli4_mbox_cmd_free(phba, mbox); - return -ENOMEM; - } - - /* Get the first SGE entry from the non-embedded DMA memory */ - if (unlikely(!mbox->sge_array)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, - "2565 Failed to get the non-embedded SGE " - "virtual address\n"); - lpfc_sli4_mbox_cmd_free(phba, mbox); - return -ENOMEM; - } - viraddr = mbox->sge_array->addr[0]; - - /* Set up the SGL pages in the non-embedded DMA pages */ - sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr; - sgl_pg_pairs = &sgl->sgl_pg_pairs; - - pg_pairs = 0; - list_for_each_entry(psb, sblist, list) { - /* Set up the sge entry */ - sgl_pg_pairs->sgl_pg0_addr_lo = - cpu_to_le32(putPaddrLow(psb->dma_phys_bpl)); - sgl_pg_pairs->sgl_pg0_addr_hi = - cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl)); - if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) - pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; - else - pdma_phys_bpl1 = 0; - sgl_pg_pairs->sgl_pg1_addr_lo = - cpu_to_le32(putPaddrLow(pdma_phys_bpl1)); - sgl_pg_pairs->sgl_pg1_addr_hi = - cpu_to_le32(putPaddrHigh(pdma_phys_bpl1)); - /* Keep the first xritag on the list */ - if (pg_pairs == 0) - xritag_start = psb->cur_iocbq.sli4_xritag; - sgl_pg_pairs++; - pg_pairs++; - } - bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start); - bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs); - /* Perform endian conversion if necessary */ - sgl->word0 = cpu_to_le32(sgl->word0); - - if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); - else { - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); - rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); - } - shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (rc != MBX_TIMEOUT) - lpfc_sli4_mbox_cmd_free(phba, mbox); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2564 POST_SGL_BLOCK mailbox command failed " - "status x%x add_status x%x mbx status x%x\n", - shdr_status, shdr_add_status, rc); - rc = -ENXIO; - } - return rc; -} - -/** - * lpfc_fc_frame_check - Check that this frame is a valid frame to handle - * @phba: pointer to lpfc_hba struct that the frame was received on - * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) - * - * This function checks the fields in the @fc_hdr to see if the FC frame is a - * valid type of frame that the LPFC driver will handle. This function will - * return a zero if the frame is a valid frame or a non zero value when the - * frame does not pass the check. - **/ -static int -lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) -{ - char *rctl_names[] = FC_RCTL_NAMES_INIT; - char *type_names[] = FC_TYPE_NAMES_INIT; - struct fc_vft_header *fc_vft_hdr; - - switch (fc_hdr->fh_r_ctl) { - case FC_RCTL_DD_UNCAT: /* uncategorized information */ - case FC_RCTL_DD_SOL_DATA: /* solicited data */ - case FC_RCTL_DD_UNSOL_CTL: /* unsolicited control */ - case FC_RCTL_DD_SOL_CTL: /* solicited control or reply */ - case FC_RCTL_DD_UNSOL_DATA: /* unsolicited data */ - case FC_RCTL_DD_DATA_DESC: /* data descriptor */ - case FC_RCTL_DD_UNSOL_CMD: /* unsolicited command */ - case FC_RCTL_DD_CMD_STATUS: /* command status */ - case FC_RCTL_ELS_REQ: /* extended link services request */ - case FC_RCTL_ELS_REP: /* extended link services reply */ - case FC_RCTL_ELS4_REQ: /* FC-4 ELS request */ - case FC_RCTL_ELS4_REP: /* FC-4 ELS reply */ - case FC_RCTL_BA_NOP: /* basic link service NOP */ - case FC_RCTL_BA_ABTS: /* basic link service abort */ - case FC_RCTL_BA_RMC: /* remove connection */ - case FC_RCTL_BA_ACC: /* basic accept */ - case FC_RCTL_BA_RJT: /* basic reject */ - case FC_RCTL_BA_PRMT: - case FC_RCTL_ACK_1: /* acknowledge_1 */ - case FC_RCTL_ACK_0: /* acknowledge_0 */ - case FC_RCTL_P_RJT: /* port reject */ - case FC_RCTL_F_RJT: /* fabric reject */ - case FC_RCTL_P_BSY: /* port busy */ - case FC_RCTL_F_BSY: /* fabric busy to data frame */ - case FC_RCTL_F_BSYL: /* fabric busy to link control frame */ - case FC_RCTL_LCR: /* link credit reset */ - case FC_RCTL_END: /* end */ - break; - case FC_RCTL_VFTH: /* Virtual Fabric tagging Header */ - fc_vft_hdr = (struct fc_vft_header *)fc_hdr; - fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1]; - return lpfc_fc_frame_check(phba, fc_hdr); - default: - goto drop; - } - switch (fc_hdr->fh_type) { - case FC_TYPE_BLS: - case FC_TYPE_ELS: - case FC_TYPE_FCP: - case FC_TYPE_CT: - break; - case FC_TYPE_IP: - case FC_TYPE_ILS: - default: - goto drop; - } - lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "2538 Received frame rctl:%s type:%s\n", - rctl_names[fc_hdr->fh_r_ctl], - type_names[fc_hdr->fh_type]); - return 0; -drop: - lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, - "2539 Dropped frame rctl:%s type:%s\n", - rctl_names[fc_hdr->fh_r_ctl], - type_names[fc_hdr->fh_type]); - return 1; -} - -/** - * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame - * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) - * - * This function processes the FC header to retrieve the VFI from the VF - * header, if one exists. This function will return the VFI if one exists - * or 0 if no VSAN Header exists. - **/ -static uint32_t -lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr) -{ - struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr; - - if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH) - return 0; - return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr); -} - -/** - * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to - * @phba: Pointer to the HBA structure to search for the vport on - * @fc_hdr: A pointer to the FC Header data (In Big Endian Format) - * @fcfi: The FC Fabric ID that the frame came from - * - * This function searches the @phba for a vport that matches the content of the - * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the - * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function - * returns the matching vport pointer or NULL if unable to match frame to a - * vport. - **/ -static struct lpfc_vport * -lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, - uint16_t fcfi) -{ - struct lpfc_vport **vports; - struct lpfc_vport *vport = NULL; - int i; - uint32_t did = (fc_hdr->fh_d_id[0] << 16 | - fc_hdr->fh_d_id[1] << 8 | - fc_hdr->fh_d_id[2]); - - vports = lpfc_create_vport_work_array(phba); - if (vports != NULL) - for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { - if (phba->fcf.fcfi == fcfi && - vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) && - vports[i]->fc_myDID == did) { - vport = vports[i]; - break; - } - } - lpfc_destroy_vport_work_array(phba, vports); - return vport; -} - -/** - * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences - * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame - * - * This function searches through the existing incomplete sequences that have - * been sent to this @vport. If the frame matches one of the incomplete - * sequences then the dbuf in the @dmabuf is added to the list of frames that - * make up that sequence. If no sequence is found that matches this frame then - * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list - * This function returns a pointer to the first dmabuf in the sequence list that - * the frame was linked to. - **/ -static struct hbq_dmabuf * -lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf) -{ - struct fc_frame_header *new_hdr; - struct fc_frame_header *temp_hdr; - struct lpfc_dmabuf *d_buf; - struct lpfc_dmabuf *h_buf; - struct hbq_dmabuf *seq_dmabuf = NULL; - struct hbq_dmabuf *temp_dmabuf = NULL; - - new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; - /* Use the hdr_buf to find the sequence that this frame belongs to */ - list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) { - temp_hdr = (struct fc_frame_header *)h_buf->virt; - if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) || - (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) || - (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3))) - continue; - /* found a pending sequence that matches this frame */ - seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf); - break; - } - if (!seq_dmabuf) { - /* - * This indicates first frame received for this sequence. - * Queue the buffer on the vport's rcv_buffer_list. - */ - list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list); - return dmabuf; - } - temp_hdr = seq_dmabuf->hbuf.virt; - if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) { - list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list); - return dmabuf; - } - /* find the correct place in the sequence to insert this frame */ - list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) { - temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); - temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt; - /* - * If the frame's sequence count is greater than the frame on - * the list then insert the frame right after this frame - */ - if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) { - list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list); - return seq_dmabuf; - } - } - return NULL; -} - -/** - * lpfc_seq_complete - Indicates if a sequence is complete - * @dmabuf: pointer to a dmabuf that describes the FC sequence - * - * This function checks the sequence, starting with the frame described by - * @dmabuf, to see if all the frames associated with this sequence are present. - * the frames associated with this sequence are linked to the @dmabuf using the - * dbuf list. This function looks for two major things. 1) That the first frame - * has a sequence count of zero. 2) There is a frame with last frame of sequence - * set. 3) That there are no holes in the sequence count. The function will - * return 1 when the sequence is complete, otherwise it will return 0. - **/ -static int -lpfc_seq_complete(struct hbq_dmabuf *dmabuf) -{ - struct fc_frame_header *hdr; - struct lpfc_dmabuf *d_buf; - struct hbq_dmabuf *seq_dmabuf; - uint32_t fctl; - int seq_count = 0; - - hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; - /* make sure first fame of sequence has a sequence count of zero */ - if (hdr->fh_seq_cnt != seq_count) - return 0; - fctl = (hdr->fh_f_ctl[0] << 16 | - hdr->fh_f_ctl[1] << 8 | - hdr->fh_f_ctl[2]); - /* If last frame of sequence we can return success. */ - if (fctl & FC_FC_END_SEQ) - return 1; - list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) { - seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf); - hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; - /* If there is a hole in the sequence count then fail. */ - if (++seq_count != hdr->fh_seq_cnt) - return 0; - fctl = (hdr->fh_f_ctl[0] << 16 | - hdr->fh_f_ctl[1] << 8 | - hdr->fh_f_ctl[2]); - /* If last frame of sequence we can return success. */ - if (fctl & FC_FC_END_SEQ) - return 1; - } - return 0; -} - -/** - * lpfc_prep_seq - Prep sequence for ULP processing - * @vport: Pointer to the vport on which this sequence was received - * @dmabuf: pointer to a dmabuf that describes the FC sequence - * - * This function takes a sequence, described by a list of frames, and creates - * a list of iocbq structures to describe the sequence. This iocbq list will be - * used to issue to the generic unsolicited sequence handler. This routine - * returns a pointer to the first iocbq in the list. If the function is unable - * to allocate an iocbq then it throw out the received frames that were not - * able to be described and return a pointer to the first iocbq. If unable to - * allocate any iocbqs (including the first) this function will return NULL. - **/ -static struct lpfc_iocbq * -lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf) -{ - struct lpfc_dmabuf *d_buf, *n_buf; - struct lpfc_iocbq *first_iocbq, *iocbq; - struct fc_frame_header *fc_hdr; - uint32_t sid; - - fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; - /* remove from receive buffer list */ - list_del_init(&seq_dmabuf->hbuf.list); - /* get the Remote Port's SID */ - sid = (fc_hdr->fh_s_id[0] << 16 | - fc_hdr->fh_s_id[1] << 8 | - fc_hdr->fh_s_id[2]); - /* Get an iocbq struct to fill in. */ - first_iocbq = lpfc_sli_get_iocbq(vport->phba); - if (first_iocbq) { - /* Initialize the first IOCB. */ - first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS; - first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX; - first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id); - first_iocbq->iocb.unsli3.rcvsli3.vpi = - vport->vpi + vport->phba->vpi_base; - /* put the first buffer into the first IOCBq */ - first_iocbq->context2 = &seq_dmabuf->dbuf; - first_iocbq->context3 = NULL; - first_iocbq->iocb.ulpBdeCount = 1; - first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = - LPFC_DATA_BUF_SIZE; - first_iocbq->iocb.un.rcvels.remoteID = sid; - } - iocbq = first_iocbq; - /* - * Each IOCBq can have two Buffers assigned, so go through the list - * of buffers for this sequence and save two buffers in each IOCBq - */ - list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) { - if (!iocbq) { - lpfc_in_buf_free(vport->phba, d_buf); - continue; - } - if (!iocbq->context3) { - iocbq->context3 = d_buf; - iocbq->iocb.ulpBdeCount++; - iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize = - LPFC_DATA_BUF_SIZE; - } else { - iocbq = lpfc_sli_get_iocbq(vport->phba); - if (!iocbq) { - if (first_iocbq) { - first_iocbq->iocb.ulpStatus = - IOSTAT_FCP_RSP_ERROR; - first_iocbq->iocb.un.ulpWord[4] = - IOERR_NO_RESOURCES; - } - lpfc_in_buf_free(vport->phba, d_buf); - continue; - } - iocbq->context2 = d_buf; - iocbq->context3 = NULL; - iocbq->iocb.ulpBdeCount = 1; - iocbq->iocb.un.cont64[0].tus.f.bdeSize = - LPFC_DATA_BUF_SIZE; - iocbq->iocb.un.rcvels.remoteID = sid; - list_add_tail(&iocbq->list, &first_iocbq->list); - } - } - return first_iocbq; -} - -/** - * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware - * @phba: Pointer to HBA context object. - * - * This function is called with no lock held. This function processes all - * the received buffers and gives it to upper layers when a received buffer - * indicates that it is the final frame in the sequence. The interrupt - * service routine processes received buffers at interrupt contexts and adds - * received dma buffers to the rb_pend_list queue and signals the worker thread. - * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the - * appropriate receive function when the final frame in a sequence is received. - **/ -int -lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba) -{ - LIST_HEAD(cmplq); - struct hbq_dmabuf *dmabuf, *seq_dmabuf; - struct fc_frame_header *fc_hdr; - struct lpfc_vport *vport; - uint32_t fcfi; - struct lpfc_iocbq *iocbq; - - /* Clear hba flag and get all received buffers into the cmplq */ - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~HBA_RECEIVE_BUFFER; - list_splice_init(&phba->rb_pend_list, &cmplq); - spin_unlock_irq(&phba->hbalock); - - /* Process each received buffer */ - while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) { - fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt; - /* check to see if this a valid type of frame */ - if (lpfc_fc_frame_check(phba, fc_hdr)) { - lpfc_in_buf_free(phba, &dmabuf->dbuf); - continue; - } - fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe); - vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi); - if (!vport) { - /* throw out the frame */ - lpfc_in_buf_free(phba, &dmabuf->dbuf); - continue; - } - /* Link this frame */ - seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf); - if (!seq_dmabuf) { - /* unable to add frame to vport - throw it out */ - lpfc_in_buf_free(phba, &dmabuf->dbuf); - continue; - } - /* If not last frame in sequence continue processing frames. */ - if (!lpfc_seq_complete(seq_dmabuf)) { - /* - * When saving off frames post a new one and mark this - * frame to be freed when it is finished. - **/ - lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1); - dmabuf->tag = -1; - continue; - } - fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt; - iocbq = lpfc_prep_seq(vport, seq_dmabuf); - if (!lpfc_complete_unsol_iocb(phba, - &phba->sli.ring[LPFC_ELS_RING], - iocbq, fc_hdr->fh_r_ctl, - fc_hdr->fh_type)) - lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, - "2540 Ring %d handler: unexpected Rctl " - "x%x Type x%x received\n", - LPFC_ELS_RING, - fc_hdr->fh_r_ctl, fc_hdr->fh_type); - }; - return 0; -} - -/** - * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to post rpi header templates to the - * HBA consistent with the SLI-4 interface spec. This routine - * posts a PAGE_SIZE memory region to the port to hold up to - * PAGE_SIZE modulo 64 rpi context headers. - * - * This routine does not require any locks. It's usage is expected - * to be driver load or reset recovery when the driver is - * sequential. - * - * Return codes - * 0 - sucessful - * EIO - The mailbox failed to complete successfully. - * When this error occurs, the driver is not guaranteed - * to have any rpi regions posted to the device and - * must either attempt to repost the regions or take a - * fatal error. - **/ -int -lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba) -{ - struct lpfc_rpi_hdr *rpi_page; - uint32_t rc = 0; - - /* Post all rpi memory regions to the port. */ - list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) { - rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page); - if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2008 Error %d posting all rpi " - "headers\n", rc); - rc = -EIO; - break; - } - } - - return rc; -} - -/** - * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port - * @phba: pointer to lpfc hba data structure. - * @rpi_page: pointer to the rpi memory region. - * - * This routine is invoked to post a single rpi header to the - * HBA consistent with the SLI-4 interface spec. This memory region - * maps up to 64 rpi context regions. - * - * Return codes - * 0 - sucessful - * ENOMEM - No available memory - * EIO - The mailbox failed to complete successfully. - **/ -int -lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page) -{ - LPFC_MBOXQ_t *mboxq; - struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl; - uint32_t rc = 0; - uint32_t mbox_tmo; - uint32_t shdr_status, shdr_add_status; - union lpfc_sli4_cfg_shdr *shdr; - - /* The port is notified of the header region via a mailbox command. */ - mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mboxq) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2001 Unable to allocate memory for issuing " - "SLI_CONFIG_SPECIAL mailbox command\n"); - return -ENOMEM; - } - - /* Post all rpi memory regions to the port. */ - hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl; - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); - lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE, - sizeof(struct lpfc_mbx_post_hdr_tmpl) - - sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED); - bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt, - hdr_tmpl, rpi_page->page_count); - bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl, - rpi_page->start_rpi); - hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys); - hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys); - if (!phba->sli4_hba.intr_enable) - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - else - rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); - shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - if (rc != MBX_TIMEOUT) - mempool_free(mboxq, phba->mbox_mem_pool); - if (shdr_status || shdr_add_status || rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2514 POST_RPI_HDR mailbox failed with " - "status x%x add_status x%x, mbx status x%x\n", - shdr_status, shdr_add_status, rc); - rc = -ENXIO; - } - return rc; -} - -/** - * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to post rpi header templates to the - * HBA consistent with the SLI-4 interface spec. This routine - * posts a PAGE_SIZE memory region to the port to hold up to - * PAGE_SIZE modulo 64 rpi context headers. - * - * Returns - * A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful - * LPFC_RPI_ALLOC_ERROR if no rpis are available. - **/ -int -lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) -{ - int rpi; - uint16_t max_rpi, rpi_base, rpi_limit; - uint16_t rpi_remaining; - struct lpfc_rpi_hdr *rpi_hdr; - - max_rpi = phba->sli4_hba.max_cfg_param.max_rpi; - rpi_base = phba->sli4_hba.max_cfg_param.rpi_base; - rpi_limit = phba->sli4_hba.next_rpi; - - /* - * The valid rpi range is not guaranteed to be zero-based. Start - * the search at the rpi_base as reported by the port. - */ - spin_lock_irq(&phba->hbalock); - rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base); - if (rpi >= rpi_limit || rpi < rpi_base) - rpi = LPFC_RPI_ALLOC_ERROR; - else { - set_bit(rpi, phba->sli4_hba.rpi_bmask); - phba->sli4_hba.max_cfg_param.rpi_used++; - phba->sli4_hba.rpi_count++; - } - - /* - * Don't try to allocate more rpi header regions if the device limit - * on available rpis max has been exhausted. - */ - if ((rpi == LPFC_RPI_ALLOC_ERROR) && - (phba->sli4_hba.rpi_count >= max_rpi)) { - spin_unlock_irq(&phba->hbalock); - return rpi; - } - - /* - * If the driver is running low on rpi resources, allocate another - * page now. Note that the next_rpi value is used because - * it represents how many are actually in use whereas max_rpi notes - * how many are supported max by the device. - */ - rpi_remaining = phba->sli4_hba.next_rpi - rpi_base - - phba->sli4_hba.rpi_count; - spin_unlock_irq(&phba->hbalock); - if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) { - rpi_hdr = lpfc_sli4_create_rpi_hdr(phba); - if (!rpi_hdr) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2002 Error Could not grow rpi " - "count\n"); - } else { - lpfc_sli4_post_rpi_hdr(phba, rpi_hdr); - } - } - - return rpi; -} - -/** - * lpfc_sli4_free_rpi - Release an rpi for reuse. - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to release an rpi to the pool of - * available rpis maintained by the driver. - **/ -void -lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) -{ - spin_lock_irq(&phba->hbalock); - clear_bit(rpi, phba->sli4_hba.rpi_bmask); - phba->sli4_hba.rpi_count--; - phba->sli4_hba.max_cfg_param.rpi_used--; - spin_unlock_irq(&phba->hbalock); -} - -/** - * lpfc_sli4_remove_rpis - Remove the rpi bitmask region - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to remove the memory region that - * provided rpi via a bitmask. - **/ -void -lpfc_sli4_remove_rpis(struct lpfc_hba *phba) -{ - kfree(phba->sli4_hba.rpi_bmask); -} - -/** - * lpfc_sli4_resume_rpi - Remove the rpi bitmask region - * @phba: pointer to lpfc hba data structure. - * - * This routine is invoked to remove the memory region that - * provided rpi via a bitmask. - **/ -int -lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp) -{ - LPFC_MBOXQ_t *mboxq; - struct lpfc_hba *phba = ndlp->phba; - int rc; - - /* The port is notified of the header region via a mailbox command. */ - mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mboxq) - return -ENOMEM; - - /* Post all rpi memory regions to the port. */ - lpfc_resume_rpi(mboxq, ndlp); - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2010 Resume RPI Mailbox failed " - "status %d, mbxStatus x%x\n", rc, - bf_get(lpfc_mqe_status, &mboxq->u.mqe)); - mempool_free(mboxq, phba->mbox_mem_pool); - return -EIO; - } - return 0; -} - -/** - * lpfc_sli4_init_vpi - Initialize a vpi with the port - * @phba: pointer to lpfc hba data structure. - * @vpi: vpi value to activate with the port. - * - * This routine is invoked to activate a vpi with the - * port when the host intends to use vports with a - * nonzero vpi. - * - * Returns: - * 0 success - * -Evalue otherwise - **/ -int -lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi) -{ - LPFC_MBOXQ_t *mboxq; - int rc = 0; - uint32_t mbox_tmo; - - if (vpi == 0) - return -EINVAL; - mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mboxq) - return -ENOMEM; - lpfc_init_vpi(mboxq, vpi); - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); - rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); - if (rc != MBX_TIMEOUT) - mempool_free(mboxq, phba->mbox_mem_pool); - if (rc != MBX_SUCCESS) { - lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "2022 INIT VPI Mailbox failed " - "status %d, mbxStatus x%x\n", rc, - bf_get(lpfc_mqe_status, &mboxq->u.mqe)); - rc = -EIO; - } - return rc; -} - -/** - * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler. - * @phba: pointer to lpfc hba data structure. - * @mboxq: Pointer to mailbox object. - * - * This routine is invoked to manually add a single FCF record. The caller - * must pass a completely initialized FCF_Record. This routine takes - * care of the nonembedded mailbox operations. - **/ -static void -lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) -{ - void *virt_addr; - union lpfc_sli4_cfg_shdr *shdr; - uint32_t shdr_status, shdr_add_status; - - virt_addr = mboxq->sge_array->addr[0]; - /* The IOCTL status is embedded in the mailbox subheader. */ - shdr = (union lpfc_sli4_cfg_shdr *) virt_addr; - shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); - shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); - - if ((shdr_status || shdr_add_status) && - (shdr_status != STATUS_FCF_IN_USE)) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2558 ADD_FCF_RECORD mailbox failed with " - "status x%x add_status x%x\n", - shdr_status, shdr_add_status); - - lpfc_sli4_mbox_cmd_free(phba, mboxq); -} - -/** - * lpfc_sli4_add_fcf_record - Manually add an FCF Record. - * @phba: pointer to lpfc hba data structure. - * @fcf_record: pointer to the initialized fcf record to add. - * - * This routine is invoked to manually add a single FCF record. The caller - * must pass a completely initialized FCF_Record. This routine takes - * care of the nonembedded mailbox operations. - **/ -int -lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record) -{ - int rc = 0; - LPFC_MBOXQ_t *mboxq; - uint8_t *bytep; - void *virt_addr; - dma_addr_t phys_addr; - struct lpfc_mbx_sge sge; - uint32_t alloc_len, req_len; - uint32_t fcfindex; - - mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mboxq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2009 Failed to allocate mbox for ADD_FCF cmd\n"); - return -ENOMEM; - } - - req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) + - sizeof(uint32_t); - - /* Allocate DMA memory and set up the non-embedded mailbox command */ - alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_ADD_FCF, - req_len, LPFC_SLI4_MBX_NEMBED); - if (alloc_len < req_len) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2523 Allocated DMA memory size (x%x) is " - "less than the requested DMA memory " - "size (x%x)\n", alloc_len, req_len); - lpfc_sli4_mbox_cmd_free(phba, mboxq); - return -ENOMEM; - } - - /* - * Get the first SGE entry from the non-embedded DMA memory. This - * routine only uses a single SGE. - */ - lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); - phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); - if (unlikely(!mboxq->sge_array)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, - "2526 Failed to get the non-embedded SGE " - "virtual address\n"); - lpfc_sli4_mbox_cmd_free(phba, mboxq); - return -ENOMEM; - } - virt_addr = mboxq->sge_array->addr[0]; - /* - * Configure the FCF record for FCFI 0. This is the driver's - * hardcoded default and gets used in nonFIP mode. - */ - fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record); - bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); - lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t)); - - /* - * Copy the fcf_index and the FCF Record Data. The data starts after - * the FCoE header plus word10. The data copy needs to be endian - * correct. - */ - bytep += sizeof(uint32_t); - lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record)); - mboxq->vport = phba->pport; - mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record; - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2515 ADD_FCF_RECORD mailbox failed with " - "status 0x%x\n", rc); - lpfc_sli4_mbox_cmd_free(phba, mboxq); - rc = -EIO; - } else - rc = 0; - - return rc; -} - -/** - * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record. - * @phba: pointer to lpfc hba data structure. - * @fcf_record: pointer to the fcf record to write the default data. - * @fcf_index: FCF table entry index. - * - * This routine is invoked to build the driver's default FCF record. The - * values used are hardcoded. This routine handles memory initialization. - * - **/ -void -lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, - struct fcf_record *fcf_record, - uint16_t fcf_index) -{ - memset(fcf_record, 0, sizeof(struct fcf_record)); - fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE; - fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER; - fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY; - bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]); - bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]); - bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]); - bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3); - bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4); - bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5); - bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]); - bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]); - bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]); - bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1); - bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index); - bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record, - LPFC_FCF_FPMA | LPFC_FCF_SPMA); - /* Set the VLAN bit map */ - if (phba->valid_vlan) { - fcf_record->vlan_bitmap[phba->vlan_id / 8] - = 1 << (phba->vlan_id % 8); - } -} - -/** - * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record. - * @phba: pointer to lpfc hba data structure. - * @fcf_index: FCF table entry offset. - * - * This routine is invoked to read up to @fcf_num of FCF record from the - * device starting with the given @fcf_index. - **/ -int -lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) -{ - int rc = 0, error; - LPFC_MBOXQ_t *mboxq; - void *virt_addr; - dma_addr_t phys_addr; - uint8_t *bytep; - struct lpfc_mbx_sge sge; - uint32_t alloc_len, req_len; - struct lpfc_mbx_read_fcf_tbl *read_fcf; - - mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mboxq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2000 Failed to allocate mbox for " - "READ_FCF cmd\n"); - return -ENOMEM; - } - - req_len = sizeof(struct fcf_record) + - sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t); - - /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */ - alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, - LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len, - LPFC_SLI4_MBX_NEMBED); - - if (alloc_len < req_len) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0291 Allocated DMA memory size (x%x) is " - "less than the requested DMA memory " - "size (x%x)\n", alloc_len, req_len); - lpfc_sli4_mbox_cmd_free(phba, mboxq); - return -ENOMEM; - } - - /* Get the first SGE entry from the non-embedded DMA memory. This - * routine only uses a single SGE. - */ - lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); - phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); - if (unlikely(!mboxq->sge_array)) { - lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, - "2527 Failed to get the non-embedded SGE " - "virtual address\n"); - lpfc_sli4_mbox_cmd_free(phba, mboxq); - return -ENOMEM; - } - virt_addr = mboxq->sge_array->addr[0]; - read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; - - /* Set up command fields */ - bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index); - /* Perform necessary endian conversion */ - bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); - lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t)); - mboxq->vport = phba->pport; - mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; - rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - lpfc_sli4_mbox_cmd_free(phba, mboxq); - error = -EIO; - } else - error = 0; - return error; -} +} /* lpfc_intr_handler */ diff --git a/trunk/drivers/scsi/lpfc/lpfc_sli.h b/trunk/drivers/scsi/lpfc/lpfc_sli.h index 7d37eb7459bf..883938652a6a 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_sli.h +++ b/trunk/drivers/scsi/lpfc/lpfc_sli.h @@ -29,23 +29,13 @@ typedef enum _lpfc_ctx_cmd { LPFC_CTX_HOST } lpfc_ctx_cmd; -/* This structure is used to carry the needed response IOCB states */ -struct lpfc_sli4_rspiocb_info { - uint8_t hw_status; - uint8_t bfield; -#define LPFC_XB 0x1 -#define LPFC_PV 0x2 - uint8_t priority; - uint8_t reserved; -}; - /* This structure is used to handle IOCB requests / responses */ struct lpfc_iocbq { /* lpfc_iocbqs are used in double linked lists */ struct list_head list; struct list_head clist; uint16_t iotag; /* pre-assigned IO tag */ - uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ + uint16_t rsvd1; IOCB_t iocb; /* IOCB cmd */ uint8_t retry; /* retry counter for IOCB cmd - if needed */ @@ -75,7 +65,7 @@ struct lpfc_iocbq { struct lpfc_iocbq *); void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); - struct lpfc_sli4_rspiocb_info sli4_info; + }; #define SLI_IOCB_RET_IOCB 1 /* Return IOCB if cmd ring full */ @@ -91,18 +81,14 @@ struct lpfc_iocbq { typedef struct lpfcMboxq { /* MBOXQs are used in single linked lists */ struct list_head list; /* ptr to next mailbox command */ - union { - MAILBOX_t mb; /* Mailbox cmd */ - struct lpfc_mqe mqe; - } u; - struct lpfc_vport *vport;/* virtual port pointer */ + MAILBOX_t mb; /* Mailbox cmd */ + struct lpfc_vport *vport;/* virutal port pointer */ void *context1; /* caller context information */ void *context2; /* caller context information */ void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *); uint8_t mbox_flag; - struct lpfc_mcqe mcqe; - struct lpfc_mbx_nembed_sge_virt *sge_array; + } LPFC_MBOXQ_t; #define MBX_POLL 1 /* poll mailbox till command done, then @@ -244,11 +230,10 @@ struct lpfc_sli { /* Additional sli_flags */ #define LPFC_SLI_MBOX_ACTIVE 0x100 /* HBA mailbox is currently active */ -#define LPFC_SLI_ACTIVE 0x200 /* SLI in firmware is active */ +#define LPFC_SLI2_ACTIVE 0x200 /* SLI2 overlay in firmware is active */ #define LPFC_PROCESS_LA 0x400 /* Able to process link attention */ #define LPFC_BLOCK_MGMT_IO 0x800 /* Don't allow mgmt mbx or iocb cmds */ #define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */ -#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */ struct lpfc_sli_ring ring[LPFC_MAX_RING]; int fcp_ring; /* ring used for FCP initiator commands */ @@ -276,8 +261,6 @@ struct lpfc_sli { #define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox command */ -#define LPFC_MBOX_SLI4_CONFIG_TMO 60 /* Sec tmo for outstanding mbox - command */ #define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write * or erase cmds. This is especially * long because of the potential of diff --git a/trunk/drivers/scsi/lpfc/lpfc_sli4.h b/trunk/drivers/scsi/lpfc/lpfc_sli4.h deleted file mode 100644 index 5196b46608d7..000000000000 --- a/trunk/drivers/scsi/lpfc/lpfc_sli4.h +++ /dev/null @@ -1,467 +0,0 @@ -/******************************************************************* - * This file is part of the Emulex Linux Device Driver for * - * Fibre Channel Host Bus Adapters. * - * Copyright (C) 2009 Emulex. All rights reserved. * - * EMULEX and SLI are trademarks of Emulex. * - * www.emulex.com * - * * - * This program is free software; you can redistribute it and/or * - * modify it under the terms of version 2 of the GNU General * - * Public License as published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful. * - * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * - * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * - * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * - * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * - * TO BE LEGALLY INVALID. See the GNU General Public License for * - * more details, a copy of which can be found in the file COPYING * - * included with this package. * - *******************************************************************/ - -#define LPFC_ACTIVE_MBOX_WAIT_CNT 100 -#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 -#define LPFC_GET_QE_REL_INT 32 -#define LPFC_RPI_LOW_WATER_MARK 10 -/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */ -#define LPFC_NEMBED_MBOX_SGL_CNT 254 - -/* Multi-queue arrangement for fast-path FCP work queues */ -#define LPFC_FN_EQN_MAX 8 -#define LPFC_SP_EQN_DEF 1 -#define LPFC_FP_EQN_DEF 1 -#define LPFC_FP_EQN_MIN 1 -#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF) - -#define LPFC_FN_WQN_MAX 32 -#define LPFC_SP_WQN_DEF 1 -#define LPFC_FP_WQN_DEF 4 -#define LPFC_FP_WQN_MIN 1 -#define LPFC_FP_WQN_MAX (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF) - -/* - * Provide the default FCF Record attributes used by the driver - * when nonFIP mode is configured and there is no other default - * FCF Record attributes. - */ -#define LPFC_FCOE_FCF_DEF_INDEX 0 -#define LPFC_FCOE_FCF_GET_FIRST 0xFFFF -#define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF - -/* First 3 bytes of default FCF MAC is specified by FC_MAP */ -#define LPFC_FCOE_FCF_MAC3 0xFF -#define LPFC_FCOE_FCF_MAC4 0xFF -#define LPFC_FCOE_FCF_MAC5 0xFE -#define LPFC_FCOE_FCF_MAP0 0x0E -#define LPFC_FCOE_FCF_MAP1 0xFC -#define LPFC_FCOE_FCF_MAP2 0x00 -#define LPFC_FCOE_MAX_RCV_SIZE 0x5AC -#define LPFC_FCOE_FKA_ADV_PER 0 -#define LPFC_FCOE_FIP_PRIORITY 0x80 - -enum lpfc_sli4_queue_type { - LPFC_EQ, - LPFC_GCQ, - LPFC_MCQ, - LPFC_WCQ, - LPFC_RCQ, - LPFC_MQ, - LPFC_WQ, - LPFC_HRQ, - LPFC_DRQ -}; - -/* The queue sub-type defines the functional purpose of the queue */ -enum lpfc_sli4_queue_subtype { - LPFC_NONE, - LPFC_MBOX, - LPFC_FCP, - LPFC_ELS, - LPFC_USOL -}; - -union sli4_qe { - void *address; - struct lpfc_eqe *eqe; - struct lpfc_cqe *cqe; - struct lpfc_mcqe *mcqe; - struct lpfc_wcqe_complete *wcqe_complete; - struct lpfc_wcqe_release *wcqe_release; - struct sli4_wcqe_xri_aborted *wcqe_xri_aborted; - struct lpfc_rcqe_complete *rcqe_complete; - struct lpfc_mqe *mqe; - union lpfc_wqe *wqe; - struct lpfc_rqe *rqe; -}; - -struct lpfc_queue { - struct list_head list; - enum lpfc_sli4_queue_type type; - enum lpfc_sli4_queue_subtype subtype; - struct lpfc_hba *phba; - struct list_head child_list; - uint32_t entry_count; /* Number of entries to support on the queue */ - uint32_t entry_size; /* Size of each queue entry. */ - uint32_t queue_id; /* Queue ID assigned by the hardware */ - struct list_head page_list; - uint32_t page_count; /* Number of pages allocated for this queue */ - - uint32_t host_index; /* The host's index for putting or getting */ - uint32_t hba_index; /* The last known hba index for get or put */ - union sli4_qe qe[1]; /* array to index entries (must be last) */ -}; - -struct lpfc_cq_event { - struct list_head list; - union { - struct lpfc_mcqe mcqe_cmpl; - struct lpfc_acqe_link acqe_link; - struct lpfc_acqe_fcoe acqe_fcoe; - struct lpfc_acqe_dcbx acqe_dcbx; - struct lpfc_rcqe rcqe_cmpl; - struct sli4_wcqe_xri_aborted wcqe_axri; - } cqe; -}; - -struct lpfc_sli4_link { - uint8_t speed; - uint8_t duplex; - uint8_t status; - uint8_t physical; - uint8_t fault; -}; - -struct lpfc_fcf { - uint8_t fabric_name[8]; - uint8_t mac_addr[6]; - uint16_t fcf_indx; - uint16_t fcfi; - uint32_t fcf_flag; -#define FCF_AVAILABLE 0x01 /* FCF available for discovery */ -#define FCF_REGISTERED 0x02 /* FCF registered with FW */ -#define FCF_DISCOVERED 0x04 /* FCF discovery started */ -#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */ -#define FCF_IN_USE 0x10 /* Atleast one discovery completed */ -#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */ - uint32_t priority; - uint32_t addr_mode; - uint16_t vlan_id; -}; - -#define LPFC_REGION23_SIGNATURE "RG23" -#define LPFC_REGION23_VERSION 1 -#define LPFC_REGION23_LAST_REC 0xff -struct lpfc_fip_param_hdr { - uint8_t type; -#define FCOE_PARAM_TYPE 0xA0 - uint8_t length; -#define FCOE_PARAM_LENGTH 2 - uint8_t parm_version; -#define FIPP_VERSION 0x01 - uint8_t parm_flags; -#define lpfc_fip_param_hdr_fipp_mode_SHIFT 6 -#define lpfc_fip_param_hdr_fipp_mode_MASK 0x3 -#define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags -#define FIPP_MODE_ON 0x2 -#define FIPP_MODE_OFF 0x0 -#define FIPP_VLAN_VALID 0x1 -}; - -struct lpfc_fcoe_params { - uint8_t fc_map[3]; - uint8_t reserved1; - uint16_t vlan_tag; - uint8_t reserved[2]; -}; - -struct lpfc_fcf_conn_hdr { - uint8_t type; -#define FCOE_CONN_TBL_TYPE 0xA1 - uint8_t length; /* words */ - uint8_t reserved[2]; -}; - -struct lpfc_fcf_conn_rec { - uint16_t flags; -#define FCFCNCT_VALID 0x0001 -#define FCFCNCT_BOOT 0x0002 -#define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */ -#define FCFCNCT_FBNM_VALID 0x0008 -#define FCFCNCT_SWNM_VALID 0x0010 -#define FCFCNCT_VLAN_VALID 0x0020 -#define FCFCNCT_AM_VALID 0x0040 -#define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */ -#define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */ - - uint16_t vlan_tag; - uint8_t fabric_name[8]; - uint8_t switch_name[8]; -}; - -struct lpfc_fcf_conn_entry { - struct list_head list; - struct lpfc_fcf_conn_rec conn_rec; -}; - -/* - * Define the host's bootstrap mailbox. This structure contains - * the member attributes needed to create, use, and destroy the - * bootstrap mailbox region. - * - * The macro definitions for the bmbx data structure are defined - * in lpfc_hw4.h with the register definition. - */ -struct lpfc_bmbx { - struct lpfc_dmabuf *dmabuf; - struct dma_address dma_address; - void *avirt; - dma_addr_t aphys; - uint32_t bmbx_size; -}; - -#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4 - -#define LPFC_EQE_SIZE_4B 4 -#define LPFC_EQE_SIZE_16B 16 -#define LPFC_CQE_SIZE 16 -#define LPFC_WQE_SIZE 64 -#define LPFC_MQE_SIZE 256 -#define LPFC_RQE_SIZE 8 - -#define LPFC_EQE_DEF_COUNT 1024 -#define LPFC_CQE_DEF_COUNT 256 -#define LPFC_WQE_DEF_COUNT 64 -#define LPFC_MQE_DEF_COUNT 16 -#define LPFC_RQE_DEF_COUNT 512 - -#define LPFC_QUEUE_NOARM false -#define LPFC_QUEUE_REARM true - - -/* - * SLI4 CT field defines - */ -#define SLI4_CT_RPI 0 -#define SLI4_CT_VPI 1 -#define SLI4_CT_VFI 2 -#define SLI4_CT_FCFI 3 - -#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000 - -/* - * SLI4 specific data structures - */ -struct lpfc_max_cfg_param { - uint16_t max_xri; - uint16_t xri_base; - uint16_t xri_used; - uint16_t max_rpi; - uint16_t rpi_base; - uint16_t rpi_used; - uint16_t max_vpi; - uint16_t vpi_base; - uint16_t vpi_used; - uint16_t max_vfi; - uint16_t vfi_base; - uint16_t vfi_used; - uint16_t max_fcfi; - uint16_t fcfi_base; - uint16_t fcfi_used; - uint16_t max_eq; - uint16_t max_rq; - uint16_t max_cq; - uint16_t max_wq; -}; - -struct lpfc_hba; -/* SLI4 HBA multi-fcp queue handler struct */ -struct lpfc_fcp_eq_hdl { - uint32_t idx; - struct lpfc_hba *phba; -}; - -/* SLI4 HBA data structure entries */ -struct lpfc_sli4_hba { - void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for - PCI BAR0, config space registers */ - void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for - PCI BAR1, control registers */ - void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for - PCI BAR2, doorbell registers */ - /* BAR0 PCI config space register memory map */ - void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */ - void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */ - void __iomem *ONLINE0regaddr; /* Address to components of internal UE */ - void __iomem *ONLINE1regaddr; /* Address to components of internal UE */ -#define LPFC_ONLINE_NERR 0xFFFFFFFF - void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */ - /* BAR1 FCoE function CSR register memory map */ - void __iomem *STAregaddr; /* Address to HST_STATE register */ - void __iomem *ISRregaddr; /* Address to HST_ISR register */ - void __iomem *IMRregaddr; /* Address to HST_IMR register */ - void __iomem *ISCRregaddr; /* Address to HST_ISCR register */ - /* BAR2 VF-0 doorbell register memory map */ - void __iomem *RQDBregaddr; /* Address to RQ_DOORBELL register */ - void __iomem *WQDBregaddr; /* Address to WQ_DOORBELL register */ - void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */ - void __iomem *MQDBregaddr; /* Address to MQ_DOORBELL register */ - void __iomem *BMBXregaddr; /* Address to BootStrap MBX register */ - - struct msix_entry *msix_entries; - uint32_t cfg_eqn; - struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */ - /* Pointers to the constructed SLI4 queues */ - struct lpfc_queue **fp_eq; /* Fast-path event queue */ - struct lpfc_queue *sp_eq; /* Slow-path event queue */ - struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */ - struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */ - struct lpfc_queue *els_wq; /* Slow-path ELS work queue */ - struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ - struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ - struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */ - struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */ - struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */ - struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */ - - /* Setup information for various queue parameters */ - int eq_esize; - int eq_ecount; - int cq_esize; - int cq_ecount; - int wq_esize; - int wq_ecount; - int mq_esize; - int mq_ecount; - int rq_esize; - int rq_ecount; -#define LPFC_SP_EQ_MAX_INTR_SEC 10000 -#define LPFC_FP_EQ_MAX_INTR_SEC 10000 - - uint32_t intr_enable; - struct lpfc_bmbx bmbx; - struct lpfc_max_cfg_param max_cfg_param; - uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */ - uint16_t next_rpi; - uint16_t scsi_xri_max; - uint16_t scsi_xri_cnt; - struct list_head lpfc_free_sgl_list; - struct list_head lpfc_sgl_list; - struct lpfc_sglq **lpfc_els_sgl_array; - struct list_head lpfc_abts_els_sgl_list; - struct lpfc_scsi_buf **lpfc_scsi_psb_array; - struct list_head lpfc_abts_scsi_buf_list; - uint32_t total_sglq_bufs; - struct lpfc_sglq **lpfc_sglq_active_list; - struct list_head lpfc_rpi_hdr_list; - unsigned long *rpi_bmask; - uint16_t rpi_count; - struct lpfc_sli4_flags sli4_flags; - struct list_head sp_rspiocb_work_queue; - struct list_head sp_cqe_event_pool; - struct list_head sp_asynce_work_queue; - struct list_head sp_fcp_xri_aborted_work_queue; - struct list_head sp_els_xri_aborted_work_queue; - struct list_head sp_unsol_work_queue; - struct lpfc_sli4_link link_state; - spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ - spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ -}; - -enum lpfc_sge_type { - GEN_BUFF_TYPE, - SCSI_BUFF_TYPE -}; - -struct lpfc_sglq { - /* lpfc_sglqs are used in double linked lists */ - struct list_head list; - struct list_head clist; - enum lpfc_sge_type buff_type; /* is this a scsi sgl */ - uint16_t iotag; /* pre-assigned IO tag */ - uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ - struct sli4_sge *sgl; /* pre-assigned SGL */ - void *virt; /* virtual address. */ - dma_addr_t phys; /* physical address */ -}; - -struct lpfc_rpi_hdr { - struct list_head list; - uint32_t len; - struct lpfc_dmabuf *dmabuf; - uint32_t page_count; - uint32_t start_rpi; -}; - -/* - * SLI4 specific function prototypes - */ -int lpfc_pci_function_reset(struct lpfc_hba *); -int lpfc_sli4_hba_setup(struct lpfc_hba *); -int lpfc_sli4_hba_down(struct lpfc_hba *); -int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t, - uint8_t, uint32_t, bool); -void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *); -void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); -void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, - struct lpfc_mbx_sge *); - -void lpfc_sli4_hba_reset(struct lpfc_hba *); -struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, - uint32_t); -void lpfc_sli4_queue_free(struct lpfc_queue *); -uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t); -uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, - struct lpfc_queue *, uint32_t, uint32_t); -uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, - struct lpfc_queue *, uint32_t); -uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *, - struct lpfc_queue *, uint32_t); -uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, - struct lpfc_queue *, struct lpfc_queue *, uint32_t); -uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); -uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); -uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); -uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *); -uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *, - struct lpfc_queue *); -int lpfc_sli4_queue_setup(struct lpfc_hba *); -void lpfc_sli4_queue_unset(struct lpfc_hba *); -int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t); -int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *); -int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *); -uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); -int lpfc_sli4_post_async_mbox(struct lpfc_hba *); -int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba); -int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int); -struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); -struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *); -void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); -void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *); -int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *); -int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *); -int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *); -struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *); -void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *); -int lpfc_sli4_alloc_rpi(struct lpfc_hba *); -void lpfc_sli4_free_rpi(struct lpfc_hba *, int); -void lpfc_sli4_remove_rpis(struct lpfc_hba *); -void lpfc_sli4_async_event_proc(struct lpfc_hba *); -int lpfc_sli4_resume_rpi(struct lpfc_nodelist *); -void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); -void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); -void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, - struct sli4_wcqe_xri_aborted *); -void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, - struct sli4_wcqe_xri_aborted *); -int lpfc_sli4_brdreset(struct lpfc_hba *); -int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *); -void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *); -int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *); -int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t); -uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); -uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); -void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); -int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t); -void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *); -int lpfc_sli4_post_status_check(struct lpfc_hba *); -uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *); - diff --git a/trunk/drivers/scsi/lpfc/lpfc_version.h b/trunk/drivers/scsi/lpfc/lpfc_version.h index 6b8a148f0a55..e599519e3078 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_version.h +++ b/trunk/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.2" +#define LPFC_DRIVER_VERSION "8.3.1" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" diff --git a/trunk/drivers/scsi/lpfc/lpfc_vport.c b/trunk/drivers/scsi/lpfc/lpfc_vport.c index a6313ee84ac5..917ad56b0aff 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_vport.c +++ b/trunk/drivers/scsi/lpfc/lpfc_vport.c @@ -32,10 +32,8 @@ #include #include #include -#include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" -#include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc_scsi.h" @@ -91,8 +89,6 @@ lpfc_alloc_vpi(struct lpfc_hba *phba) vpi = 0; else set_bit(vpi, phba->vpi_bmask); - if (phba->sli_rev == LPFC_SLI_REV4) - phba->sli4_hba.max_cfg_param.vpi_used++; spin_unlock_irq(&phba->hbalock); return vpi; } @@ -100,12 +96,8 @@ lpfc_alloc_vpi(struct lpfc_hba *phba) static void lpfc_free_vpi(struct lpfc_hba *phba, int vpi) { - if (vpi == 0) - return; spin_lock_irq(&phba->hbalock); clear_bit(vpi, phba->vpi_bmask); - if (phba->sli_rev == LPFC_SLI_REV4) - phba->sli4_hba.max_cfg_param.vpi_used--; spin_unlock_irq(&phba->hbalock); } @@ -121,7 +113,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport) if (!pmb) { return -ENOMEM; } - mb = &pmb->u.mb; + mb = &pmb->mb; lpfc_read_sparam(phba, pmb, vport->vpi); /* @@ -251,22 +243,23 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport) (vport->fc_flag & wait_flags) || ((vport->port_state > LPFC_VPORT_FAILED) && (vport->port_state < LPFC_VPORT_READY))) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, + lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, "1833 Vport discovery quiesce Wait:" - " state x%x fc_flags x%x" + " vpi x%x state x%x fc_flags x%x" " num_nodes x%x, waiting 1000 msecs" " total wait msecs x%x\n", - vport->port_state, vport->fc_flag, - vport->num_disc_nodes, + vport->vpi, vport->port_state, + vport->fc_flag, vport->num_disc_nodes, jiffies_to_msecs(jiffies - start_time)); msleep(1000); } else { /* Base case. Wait variants satisfied. Break out */ - lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT, + lpfc_printf_log(phba, KERN_INFO, LOG_VPORT, "1834 Vport discovery quiesced:" - " state x%x fc_flags x%x" + " vpi x%x state x%x fc_flags x%x" " wait msecs x%x\n", - vport->port_state, vport->fc_flag, + vport->vpi, vport->port_state, + vport->fc_flag, jiffies_to_msecs(jiffies - start_time)); break; @@ -274,10 +267,12 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport) } if (time_after(jiffies, wait_time_max)) - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, + lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, "1835 Vport discovery quiesce failed:" - " state x%x fc_flags x%x wait msecs x%x\n", - vport->port_state, vport->fc_flag, + " vpi x%x state x%x fc_flags x%x" + " wait msecs x%x\n", + vport->vpi, vport->port_state, + vport->fc_flag, jiffies_to_msecs(jiffies - start_time)); } @@ -313,21 +308,6 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable) goto error_out; } - /* - * In SLI4, the vpi must be activated before it can be used - * by the port. - */ - if (phba->sli_rev == LPFC_SLI_REV4) { - rc = lpfc_sli4_init_vpi(phba, vpi); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_VPORT, - "1838 Failed to INIT_VPI on vpi %d " - "status %d\n", vpi, rc); - rc = VPORT_NORESOURCES; - lpfc_free_vpi(phba, vpi); - goto error_out; - } - } /* Assign an unused board number */ if ((instance = lpfc_get_instance()) < 0) { @@ -555,16 +535,6 @@ lpfc_vport_delete(struct fc_vport *fc_vport) "physical host\n"); return VPORT_ERROR; } - - /* If the vport is a static vport fail the deletion. */ - if ((vport->vport_flag & STATIC_VPORT) && - !(phba->pport->load_flag & FC_UNLOADING)) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, - "1837 vport_delete failed: Cannot delete " - "static vport.\n"); - return VPORT_ERROR; - } - /* * If we are not unloading the driver then prevent the vport_delete * from happening until after this vport's discovery is finished. @@ -740,7 +710,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba) struct lpfc_vport *port_iterator; struct lpfc_vport **vports; int index = 0; - vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *), + vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *), GFP_KERNEL); if (vports == NULL) return NULL; @@ -764,7 +734,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports) int i; if (vports == NULL) return; - for (i = 0; vports[i] != NULL && i <= phba->max_vports; i++) + for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++) scsi_host_put(lpfc_shost_from_vport(vports[i])); kfree(vports); } diff --git a/trunk/drivers/scsi/mpt2sas/mpt2sas_base.h b/trunk/drivers/scsi/mpt2sas/mpt2sas_base.h index 286c185fa9e4..36b1d1052ba1 100644 --- a/trunk/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/trunk/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -61,7 +61,6 @@ #include #include #include -#include #include "mpt2sas_debug.h" @@ -69,10 +68,10 @@ #define MPT2SAS_DRIVER_NAME "mpt2sas" #define MPT2SAS_AUTHOR "LSI Corporation " #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" -#define MPT2SAS_DRIVER_VERSION "01.100.03.00" +#define MPT2SAS_DRIVER_VERSION "01.100.02.00" #define MPT2SAS_MAJOR_VERSION 01 #define MPT2SAS_MINOR_VERSION 100 -#define MPT2SAS_BUILD_VERSION 03 +#define MPT2SAS_BUILD_VERSION 02 #define MPT2SAS_RELEASE_VERSION 00 /* diff --git a/trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 14e473d1fa7b..ba6ab170bdf0 100644 --- a/trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/trunk/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -473,7 +473,7 @@ _ctl_poll(struct file *filep, poll_table *wait) } /** - * _ctl_set_task_mid - assign an active smid to tm request + * _ctl_do_task_abort - assign an active smid to the abort_task * @ioc: per adapter object * @karg - (struct mpt2_ioctl_command) * @tm_request - pointer to mf from user space @@ -482,7 +482,7 @@ _ctl_poll(struct file *filep, poll_table *wait) * during failure, the reply frame is filled. */ static int -_ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, +_ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, Mpi2SCSITaskManagementRequest_t *tm_request) { u8 found = 0; @@ -494,14 +494,6 @@ _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, Mpi2SCSITaskManagementReply_t *tm_reply; u32 sz; u32 lun; - char *desc = NULL; - - if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) - desc = "abort_task"; - else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) - desc = "query_task"; - else - return 0; lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); @@ -525,13 +517,13 @@ _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); if (!found) { - dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " - "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name, - desc, tm_request->DevHandle, lun)); + dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: " + "DevHandle(0x%04x), lun(%d), no active mid!!\n", ioc->name, + tm_request->DevHandle, lun)); tm_reply = ioc->ctl_cmds.reply; tm_reply->DevHandle = tm_request->DevHandle; tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; - tm_reply->TaskType = tm_request->TaskType; + tm_reply->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; tm_reply->VP_ID = tm_request->VP_ID; tm_reply->VF_ID = tm_request->VF_ID; @@ -543,9 +535,9 @@ _ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg, return 1; } - dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: " - "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name, - desc, tm_request->DevHandle, lun, tm_request->TaskMID)); + dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: " + "DevHandle(0x%04x), lun(%d), smid(%d)\n", ioc->name, + tm_request->DevHandle, lun, tm_request->TaskMID)); return 0; } @@ -747,10 +739,8 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, (Mpi2SCSITaskManagementRequest_t *)mpi_request; if (tm_request->TaskType == - MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || - tm_request->TaskType == - MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { - if (_ctl_set_task_mid(ioc, &karg, tm_request)) { + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) { + if (_ctl_do_task_abort(ioc, &karg, tm_request)) { mpt2sas_base_free_smid(ioc, smid); goto out; } diff --git a/trunk/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/trunk/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 2a01a5f2a84d..e3a7967259e7 100644 --- a/trunk/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/trunk/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -197,12 +197,12 @@ static struct pci_device_id scsih_pci_table[] = { MODULE_DEVICE_TABLE(pci, scsih_pci_table); /** - * _scsih_set_debug_level - global setting of ioc->logging_level. + * scsih_set_debug_level - global setting of ioc->logging_level. * * Note: The logging levels are defined in mpt2sas_debug.h. */ static int -_scsih_set_debug_level(const char *val, struct kernel_param *kp) +scsih_set_debug_level(const char *val, struct kernel_param *kp) { int ret = param_set_int(val, kp); struct MPT2SAS_ADAPTER *ioc; @@ -215,7 +215,7 @@ _scsih_set_debug_level(const char *val, struct kernel_param *kp) ioc->logging_level = logging_level; return 0; } -module_param_call(logging_level, _scsih_set_debug_level, param_get_int, +module_param_call(logging_level, scsih_set_debug_level, param_get_int, &logging_level, 0644); /** @@ -883,41 +883,6 @@ _scsih_scsi_lookup_find_by_target(struct MPT2SAS_ADAPTER *ioc, int id, return found; } -/** - * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun - * @ioc: per adapter object - * @id: target id - * @lun: lun number - * @channel: channel - * Context: This function will acquire ioc->scsi_lookup_lock. - * - * This will search for a matching channel:id:lun in the scsi_lookup array, - * returning 1 if found. - */ -static u8 -_scsih_scsi_lookup_find_by_lun(struct MPT2SAS_ADAPTER *ioc, int id, - unsigned int lun, int channel) -{ - u8 found; - unsigned long flags; - int i; - - spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); - found = 0; - for (i = 0 ; i < ioc->request_depth; i++) { - if (ioc->scsi_lookup[i].scmd && - (ioc->scsi_lookup[i].scmd->device->id == id && - ioc->scsi_lookup[i].scmd->device->channel == channel && - ioc->scsi_lookup[i].scmd->device->lun == lun)) { - found = 1; - goto out; - } - } - out: - spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); - return found; -} - /** * _scsih_get_chain_buffer_dma - obtain block of chains (dma address) * @ioc: per adapter object @@ -1082,14 +1047,14 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc, } /** - * _scsih_change_queue_depth - setting device queue depth + * scsih_change_queue_depth - setting device queue depth * @sdev: scsi device struct * @qdepth: requested queue depth * * Returns queue depth. */ static int -_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) +scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) { struct Scsi_Host *shost = sdev->host; int max_depth; @@ -1114,14 +1079,14 @@ _scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) } /** - * _scsih_change_queue_depth - changing device queue tag type + * scsih_change_queue_depth - changing device queue tag type * @sdev: scsi device struct * @tag_type: requested tag type * * Returns queue tag type. */ static int -_scsih_change_queue_type(struct scsi_device *sdev, int tag_type) +scsih_change_queue_type(struct scsi_device *sdev, int tag_type) { if (sdev->tagged_supported) { scsi_set_tag_type(sdev, tag_type); @@ -1136,14 +1101,14 @@ _scsih_change_queue_type(struct scsi_device *sdev, int tag_type) } /** - * _scsih_target_alloc - target add routine + * scsih_target_alloc - target add routine * @starget: scsi target struct * * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int -_scsih_target_alloc(struct scsi_target *starget) +scsih_target_alloc(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -1198,13 +1163,13 @@ _scsih_target_alloc(struct scsi_target *starget) } /** - * _scsih_target_destroy - target destroy routine + * scsih_target_destroy - target destroy routine * @starget: scsi target struct * * Returns nothing. */ static void -_scsih_target_destroy(struct scsi_target *starget) +scsih_target_destroy(struct scsi_target *starget) { struct Scsi_Host *shost = dev_to_shost(&starget->dev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -1247,14 +1212,14 @@ _scsih_target_destroy(struct scsi_target *starget) } /** - * _scsih_slave_alloc - device add routine + * scsih_slave_alloc - device add routine * @sdev: scsi device struct * * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int -_scsih_slave_alloc(struct scsi_device *sdev) +scsih_slave_alloc(struct scsi_device *sdev) { struct Scsi_Host *shost; struct MPT2SAS_ADAPTER *ioc; @@ -1308,13 +1273,13 @@ _scsih_slave_alloc(struct scsi_device *sdev) } /** - * _scsih_slave_destroy - device destroy routine + * scsih_slave_destroy - device destroy routine * @sdev: scsi device struct * * Returns nothing. */ static void -_scsih_slave_destroy(struct scsi_device *sdev) +scsih_slave_destroy(struct scsi_device *sdev) { struct MPT2SAS_TARGET *sas_target_priv_data; struct scsi_target *starget; @@ -1330,13 +1295,13 @@ _scsih_slave_destroy(struct scsi_device *sdev) } /** - * _scsih_display_sata_capabilities - sata capabilities + * scsih_display_sata_capabilities - sata capabilities * @ioc: per adapter object * @sas_device: the sas_device object * @sdev: scsi device struct */ static void -_scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc, +scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc, struct _sas_device *sas_device, struct scsi_device *sdev) { Mpi2ConfigReply_t mpi_reply; @@ -1436,14 +1401,14 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc, } /** - * _scsih_slave_configure - device configure routine. + * scsih_slave_configure - device configure routine. * @sdev: scsi device struct * * Returns 0 if ok. Any other return is assumed to be an error and * the device is ignored. */ static int -_scsih_slave_configure(struct scsi_device *sdev) +scsih_slave_configure(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -1524,7 +1489,7 @@ _scsih_slave_configure(struct scsi_device *sdev) r_level, raid_device->handle, (unsigned long long)raid_device->wwid, raid_device->num_pds, ds); - _scsih_change_queue_depth(sdev, qdepth); + scsih_change_queue_depth(sdev, qdepth); return 0; } @@ -1567,10 +1532,10 @@ _scsih_slave_configure(struct scsi_device *sdev) sas_device->slot); if (!ssp_target) - _scsih_display_sata_capabilities(ioc, sas_device, sdev); + scsih_display_sata_capabilities(ioc, sas_device, sdev); } - _scsih_change_queue_depth(sdev, qdepth); + scsih_change_queue_depth(sdev, qdepth); if (ssp_target) sas_read_port_mode_page(sdev); @@ -1578,7 +1543,7 @@ _scsih_slave_configure(struct scsi_device *sdev) } /** - * _scsih_bios_param - fetch head, sector, cylinder info for a disk + * scsih_bios_param - fetch head, sector, cylinder info for a disk * @sdev: scsi device struct * @bdev: pointer to block device context * @capacity: device size (in 512 byte sectors) @@ -1590,7 +1555,7 @@ _scsih_slave_configure(struct scsi_device *sdev) * Return nothing. */ static int -_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, +scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int params[]) { int heads; @@ -1671,7 +1636,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code) } /** - * _scsih_tm_done - tm completion routine + * scsih_tm_done - tm completion routine * @ioc: per adapter object * @smid: system request message index * @VF_ID: virtual function id @@ -1683,7 +1648,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code) * Return nothing. */ static void -_scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) +scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) { MPI2DefaultReply_t *mpi_reply; @@ -1858,13 +1823,13 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun, } /** - * _scsih_abort - eh threads main abort routine + * scsih_abort - eh threads main abort routine * @sdev: scsi device struct * * Returns SUCCESS if command aborted else FAILED */ static int -_scsih_abort(struct scsi_cmnd *scmd) +scsih_abort(struct scsi_cmnd *scmd) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT2SAS_DEVICE *sas_device_priv_data; @@ -1924,86 +1889,15 @@ _scsih_abort(struct scsi_cmnd *scmd) return r; } -/** - * _scsih_dev_reset - eh threads main device reset routine - * @sdev: scsi device struct - * - * Returns SUCCESS if command aborted else FAILED - */ -static int -_scsih_dev_reset(struct scsi_cmnd *scmd) -{ - struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); - struct MPT2SAS_DEVICE *sas_device_priv_data; - struct _sas_device *sas_device; - unsigned long flags; - u16 handle; - int r; - - printk(MPT2SAS_INFO_FMT "attempting device reset! scmd(%p)\n", - ioc->name, scmd); - scsi_print_command(scmd); - - sas_device_priv_data = scmd->device->hostdata; - if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { - printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n", - ioc->name, scmd); - scmd->result = DID_NO_CONNECT << 16; - scmd->scsi_done(scmd); - r = SUCCESS; - goto out; - } - - /* for hidden raid components obtain the volume_handle */ - handle = 0; - if (sas_device_priv_data->sas_target->flags & - MPT_TARGET_FLAGS_RAID_COMPONENT) { - spin_lock_irqsave(&ioc->sas_device_lock, flags); - sas_device = _scsih_sas_device_find_by_handle(ioc, - sas_device_priv_data->sas_target->handle); - if (sas_device) - handle = sas_device->volume_handle; - spin_unlock_irqrestore(&ioc->sas_device_lock, flags); - } else - handle = sas_device_priv_data->sas_target->handle; - - if (!handle) { - scmd->result = DID_RESET << 16; - r = FAILED; - goto out; - } - - mutex_lock(&ioc->tm_cmds.mutex); - mpt2sas_scsih_issue_tm(ioc, handle, 0, - MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, scmd->device->lun, - 30); - - /* - * sanity check see whether all commands to this device been - * completed - */ - if (_scsih_scsi_lookup_find_by_lun(ioc, scmd->device->id, - scmd->device->lun, scmd->device->channel)) - r = FAILED; - else - r = SUCCESS; - ioc->tm_cmds.status = MPT2_CMD_NOT_USED; - mutex_unlock(&ioc->tm_cmds.mutex); - - out: - printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n", - ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); - return r; -} /** - * _scsih_target_reset - eh threads main target reset routine + * scsih_dev_reset - eh threads main device reset routine * @sdev: scsi device struct * * Returns SUCCESS if command aborted else FAILED */ static int -_scsih_target_reset(struct scsi_cmnd *scmd) +scsih_dev_reset(struct scsi_cmnd *scmd) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT2SAS_DEVICE *sas_device_priv_data; @@ -2018,7 +1912,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd) sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { - printk(MPT2SAS_INFO_FMT "target been deleted! scmd(%p)\n", + printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n", ioc->name, scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); @@ -2068,13 +1962,13 @@ _scsih_target_reset(struct scsi_cmnd *scmd) } /** - * _scsih_abort - eh threads main host reset routine + * scsih_abort - eh threads main host reset routine * @sdev: scsi device struct * * Returns SUCCESS if command aborted else FAILED */ static int -_scsih_host_reset(struct scsi_cmnd *scmd) +scsih_host_reset(struct scsi_cmnd *scmd) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); int r, retval; @@ -2496,107 +2390,7 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase) } /** - * _scsih_setup_eedp - setup MPI request for EEDP transfer - * @scmd: pointer to scsi command object - * @mpi_request: pointer to the SCSI_IO reqest message frame - * - * Supporting protection 1 and 3. - * - * Returns nothing - */ -static void -_scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request) -{ - u16 eedp_flags; - unsigned char prot_op = scsi_get_prot_op(scmd); - unsigned char prot_type = scsi_get_prot_type(scmd); - - if (prot_type == SCSI_PROT_DIF_TYPE0 || - prot_type == SCSI_PROT_DIF_TYPE2 || - prot_op == SCSI_PROT_NORMAL) - return; - - if (prot_op == SCSI_PROT_READ_STRIP) - eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; - else if (prot_op == SCSI_PROT_WRITE_INSERT) - eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; - else - return; - - mpi_request->EEDPBlockSize = scmd->device->sector_size; - - switch (prot_type) { - case SCSI_PROT_DIF_TYPE1: - - /* - * enable ref/guard checking - * auto increment ref tag - */ - mpi_request->EEDPFlags = eedp_flags | - MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | - MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | - MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; - mpi_request->CDB.EEDP32.PrimaryReferenceTag = - cpu_to_be32(scsi_get_lba(scmd)); - - break; - - case SCSI_PROT_DIF_TYPE3: - - /* - * enable guard checking - */ - mpi_request->EEDPFlags = eedp_flags | - MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; - - break; - } -} - -/** - * _scsih_eedp_error_handling - return sense code for EEDP errors - * @scmd: pointer to scsi command object - * @ioc_status: ioc status - * - * Returns nothing - */ -static void -_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) -{ - u8 ascq; - u8 sk; - u8 host_byte; - - switch (ioc_status) { - case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: - ascq = 0x01; - break; - case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: - ascq = 0x02; - break; - case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: - ascq = 0x03; - break; - default: - ascq = 0x00; - break; - } - - if (scmd->sc_data_direction == DMA_TO_DEVICE) { - sk = ILLEGAL_REQUEST; - host_byte = DID_ABORT; - } else { - sk = ABORTED_COMMAND; - host_byte = DID_OK; - } - - scsi_build_sense_buffer(0, scmd->sense_buffer, sk, 0x10, ascq); - scmd->result = DRIVER_SENSE << 24 | (host_byte << 16) | - SAM_STAT_CHECK_CONDITION; -} - -/** - * _scsih_qcmd - main scsi request entry point + * scsih_qcmd - main scsi request entry point * @scmd: pointer to scsi command object * @done: function pointer to be invoked on completion * @@ -2607,7 +2401,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full */ static int -_scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) +scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT2SAS_DEVICE *sas_device_priv_data; @@ -2676,7 +2470,6 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) } mpi_request = mpt2sas_base_get_msg_frame(ioc, smid); memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t)); - _scsih_setup_eedp(scmd, mpi_request); mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; if (sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) @@ -2811,15 +2604,6 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: desc_ioc_state = "scsi ext terminated"; break; - case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: - desc_ioc_state = "eedp guard error"; - break; - case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: - desc_ioc_state = "eedp ref tag error"; - break; - case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: - desc_ioc_state = "eedp app tag error"; - break; default: desc_ioc_state = "unknown"; break; @@ -2999,7 +2783,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle) } /** - * _scsih_io_done - scsi request callback + * scsih_io_done - scsi request callback * @ioc: per adapter object * @smid: system request message index * @VF_ID: virtual function id @@ -3010,7 +2794,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle) * Return nothing. */ static void -_scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) +scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) { Mpi2SCSIIORequest_t *mpi_request; Mpi2SCSIIOReply_t *mpi_reply; @@ -3155,11 +2939,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply) scmd->result = DID_RESET << 16; break; - case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: - case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: - case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: - _scsih_eedp_error_handling(scmd, ioc_status); - break; case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: case MPI2_IOCSTATUS_INVALID_FUNCTION: case MPI2_IOCSTATUS_INVALID_SGL: @@ -5351,19 +5130,18 @@ static struct scsi_host_template scsih_driver_template = { .module = THIS_MODULE, .name = "Fusion MPT SAS Host", .proc_name = MPT2SAS_DRIVER_NAME, - .queuecommand = _scsih_qcmd, - .target_alloc = _scsih_target_alloc, - .slave_alloc = _scsih_slave_alloc, - .slave_configure = _scsih_slave_configure, - .target_destroy = _scsih_target_destroy, - .slave_destroy = _scsih_slave_destroy, - .change_queue_depth = _scsih_change_queue_depth, - .change_queue_type = _scsih_change_queue_type, - .eh_abort_handler = _scsih_abort, - .eh_device_reset_handler = _scsih_dev_reset, - .eh_target_reset_handler = _scsih_target_reset, - .eh_host_reset_handler = _scsih_host_reset, - .bios_param = _scsih_bios_param, + .queuecommand = scsih_qcmd, + .target_alloc = scsih_target_alloc, + .slave_alloc = scsih_slave_alloc, + .slave_configure = scsih_slave_configure, + .target_destroy = scsih_target_destroy, + .slave_destroy = scsih_slave_destroy, + .change_queue_depth = scsih_change_queue_depth, + .change_queue_type = scsih_change_queue_type, + .eh_abort_handler = scsih_abort, + .eh_device_reset_handler = scsih_dev_reset, + .eh_host_reset_handler = scsih_host_reset, + .bios_param = scsih_bios_param, .can_queue = 1, .this_id = -1, .sg_tablesize = MPT2SAS_SG_DEPTH, @@ -5450,13 +5228,13 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc, } /** - * _scsih_remove - detach and remove add host + * scsih_remove - detach and remove add host * @pdev: PCI device struct * * Return nothing. */ static void __devexit -_scsih_remove(struct pci_dev *pdev) +scsih_remove(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -5664,14 +5442,14 @@ _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc) } /** - * _scsih_probe - attach and add scsi host + * scsih_probe - attach and add scsi host * @pdev: PCI device struct * @id: pci device id * * Returns 0 success, anything else error. */ static int -_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) +scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct MPT2SAS_ADAPTER *ioc; struct Scsi_Host *shost; @@ -5725,9 +5503,6 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto out_add_shost_fail; } - scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION - | SHOST_DIF_TYPE3_PROTECTION); - /* event thread */ snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), "fw_event%d", ioc->id); @@ -5761,14 +5536,14 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) #ifdef CONFIG_PM /** - * _scsih_suspend - power management suspend main entry point + * scsih_suspend - power management suspend main entry point * @pdev: PCI device struct * @state: PM state change to (usually PCI_D3) * * Returns 0 success, anything else error. */ static int -_scsih_suspend(struct pci_dev *pdev, pm_message_t state) +scsih_suspend(struct pci_dev *pdev, pm_message_t state) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -5789,13 +5564,13 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state) } /** - * _scsih_resume - power management resume main entry point + * scsih_resume - power management resume main entry point * @pdev: PCI device struct * * Returns 0 success, anything else error. */ static int -_scsih_resume(struct pci_dev *pdev) +scsih_resume(struct pci_dev *pdev) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -5824,22 +5599,22 @@ _scsih_resume(struct pci_dev *pdev) static struct pci_driver scsih_driver = { .name = MPT2SAS_DRIVER_NAME, .id_table = scsih_pci_table, - .probe = _scsih_probe, - .remove = __devexit_p(_scsih_remove), + .probe = scsih_probe, + .remove = __devexit_p(scsih_remove), #ifdef CONFIG_PM - .suspend = _scsih_suspend, - .resume = _scsih_resume, + .suspend = scsih_suspend, + .resume = scsih_resume, #endif }; /** - * _scsih_init - main entry point for this driver. + * scsih_init - main entry point for this driver. * * Returns 0 success, anything else error. */ static int __init -_scsih_init(void) +scsih_init(void) { int error; @@ -5855,10 +5630,10 @@ _scsih_init(void) mpt2sas_base_initialize_callback_handler(); /* queuecommand callback hander */ - scsi_io_cb_idx = mpt2sas_base_register_callback_handler(_scsih_io_done); + scsi_io_cb_idx = mpt2sas_base_register_callback_handler(scsih_io_done); /* task managment callback handler */ - tm_cb_idx = mpt2sas_base_register_callback_handler(_scsih_tm_done); + tm_cb_idx = mpt2sas_base_register_callback_handler(scsih_tm_done); /* base internal commands callback handler */ base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done); @@ -5884,12 +5659,12 @@ _scsih_init(void) } /** - * _scsih_exit - exit point for this driver (when it is a module). + * scsih_exit - exit point for this driver (when it is a module). * * Returns 0 success, anything else error. */ static void __exit -_scsih_exit(void) +scsih_exit(void) { printk(KERN_INFO "mpt2sas version %s unloading\n", MPT2SAS_DRIVER_VERSION); @@ -5907,5 +5682,5 @@ _scsih_exit(void) mpt2sas_ctl_exit(); } -module_init(_scsih_init); -module_exit(_scsih_exit); +module_init(scsih_init); +module_exit(scsih_exit); diff --git a/trunk/drivers/scsi/mpt2sas/mpt2sas_transport.c b/trunk/drivers/scsi/mpt2sas/mpt2sas_transport.c index 686695b155c7..5c65da519e39 100644 --- a/trunk/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/trunk/drivers/scsi/mpt2sas/mpt2sas_transport.c @@ -264,7 +264,7 @@ struct rep_manu_reply{ }; /** - * _transport_expander_report_manufacture - obtain SMP report_manufacture + * transport_expander_report_manufacture - obtain SMP report_manufacture * @ioc: per adapter object * @sas_address: expander sas address * @edev: the sas_expander_device object @@ -274,7 +274,7 @@ struct rep_manu_reply{ * Returns 0 for success, non-zero for failure. */ static int -_transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, +transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc, u64 sas_address, struct sas_expander_device *edev) { Mpi2SmpPassthroughRequest_t *mpi_request; @@ -578,7 +578,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle, MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER || mpt2sas_port->remote_identify.device_type == MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) - _transport_expander_report_manufacture(ioc, + transport_expander_report_manufacture(ioc, mpt2sas_port->remote_identify.sas_address, rphy_to_expander_device(rphy)); @@ -852,7 +852,7 @@ rphy_to_ioc(struct sas_rphy *rphy) } /** - * _transport_get_linkerrors - + * transport_get_linkerrors - * @phy: The sas phy object * * Only support sas_host direct attached phys. @@ -860,7 +860,7 @@ rphy_to_ioc(struct sas_rphy *rphy) * */ static int -_transport_get_linkerrors(struct sas_phy *phy) +transport_get_linkerrors(struct sas_phy *phy) { struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); struct _sas_phy *mpt2sas_phy; @@ -903,14 +903,14 @@ _transport_get_linkerrors(struct sas_phy *phy) } /** - * _transport_get_enclosure_identifier - + * transport_get_enclosure_identifier - * @phy: The sas phy object * * Obtain the enclosure logical id for an expander. * Returns 0 for success, non-zero for failure. */ static int -_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) +transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) { struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); struct _sas_node *sas_expander; @@ -929,13 +929,13 @@ _transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) } /** - * _transport_get_bay_identifier - + * transport_get_bay_identifier - * @phy: The sas phy object * * Returns the slot id for a device that resides inside an enclosure. */ static int -_transport_get_bay_identifier(struct sas_rphy *rphy) +transport_get_bay_identifier(struct sas_rphy *rphy) { struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy); struct _sas_device *sas_device; @@ -953,7 +953,7 @@ _transport_get_bay_identifier(struct sas_rphy *rphy) } /** - * _transport_phy_reset - + * transport_phy_reset - * @phy: The sas phy object * @hard_reset: * @@ -961,7 +961,7 @@ _transport_get_bay_identifier(struct sas_rphy *rphy) * Returns 0 for success, non-zero for failure. */ static int -_transport_phy_reset(struct sas_phy *phy, int hard_reset) +transport_phy_reset(struct sas_phy *phy, int hard_reset) { struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy); struct _sas_phy *mpt2sas_phy; @@ -1002,7 +1002,7 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset) } /** - * _transport_smp_handler - transport portal for smp passthru + * transport_smp_handler - transport portal for smp passthru * @shost: shost object * @rphy: sas transport rphy object * @req: @@ -1012,7 +1012,7 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset) * smp_rep_general /sys/class/bsg/expander-5:0 */ static int -_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, +transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, struct request *req) { struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); @@ -1200,11 +1200,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, } struct sas_function_template mpt2sas_transport_functions = { - .get_linkerrors = _transport_get_linkerrors, - .get_enclosure_identifier = _transport_get_enclosure_identifier, - .get_bay_identifier = _transport_get_bay_identifier, - .phy_reset = _transport_phy_reset, - .smp_handler = _transport_smp_handler, + .get_linkerrors = transport_get_linkerrors, + .get_enclosure_identifier = transport_get_enclosure_identifier, + .get_bay_identifier = transport_get_bay_identifier, + .phy_reset = transport_phy_reset, + .smp_handler = transport_smp_handler, }; struct scsi_transport_template *mpt2sas_transport_template; diff --git a/trunk/drivers/scsi/mvsas.c b/trunk/drivers/scsi/mvsas.c new file mode 100644 index 000000000000..e4acebd10d1b --- /dev/null +++ b/trunk/drivers/scsi/mvsas.c @@ -0,0 +1,3222 @@ +/* + mvsas.c - Marvell 88SE6440 SAS/SATA support + + Copyright 2007 Red Hat, Inc. + Copyright 2008 Marvell. + + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License as + published by the Free Software Foundation; either version 2, + or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty + of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the GNU General Public License for more details. + + You should have received a copy of the GNU General Public + License along with this program; see the file COPYING. If not, + write to the Free Software Foundation, 675 Mass Ave, Cambridge, + MA 02139, USA. + + --------------------------------------------------------------- + + Random notes: + * hardware supports controlling the endian-ness of data + structures. this permits elimination of all the le32_to_cpu() + and cpu_to_le32() conversions. + + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "mvsas" +#define DRV_VERSION "0.5.2" +#define _MV_DUMP 0 +#define MVS_DISABLE_NVRAM +#define MVS_DISABLE_MSI + +#define mr32(reg) readl(regs + MVS_##reg) +#define mw32(reg,val) writel((val), regs + MVS_##reg) +#define mw32_f(reg,val) do { \ + writel((val), regs + MVS_##reg); \ + readl(regs + MVS_##reg); \ + } while (0) + +#define MVS_ID_NOT_MAPPED 0x7f +#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) + +/* offset for D2H FIS in the Received FIS List Structure */ +#define SATA_RECEIVED_D2H_FIS(reg_set) \ + ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40) +#define SATA_RECEIVED_PIO_FIS(reg_set) \ + ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20) +#define UNASSOC_D2H_FIS(id) \ + ((void *) mvi->rx_fis + 0x100 * id) + +#define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \ + for ((__mc) = (__lseq_mask), (__lseq) = 0; \ + (__mc) != 0 && __rest; \ + (++__lseq), (__mc) >>= 1) + +/* driver compile-time configuration */ +enum driver_configuration { + MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ + MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ + /* software requires power-of-2 + ring size */ + + MVS_SLOTS = 512, /* command slots */ + MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */ + MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ + MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ + MVS_OAF_SZ = 64, /* Open address frame buffer size */ + + MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */ + + MVS_QUEUE_SIZE = 30, /* Support Queue depth */ + MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */ +}; + +/* unchangeable hardware details */ +enum hardware_details { + MVS_MAX_PHYS = 8, /* max. possible phys */ + MVS_MAX_PORTS = 8, /* max. possible ports */ + MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100), +}; + +/* peripheral registers (BAR2) */ +enum peripheral_registers { + SPI_CTL = 0x10, /* EEPROM control */ + SPI_CMD = 0x14, /* EEPROM command */ + SPI_DATA = 0x18, /* EEPROM data */ +}; + +enum peripheral_register_bits { + TWSI_RDY = (1U << 7), /* EEPROM interface ready */ + TWSI_RD = (1U << 4), /* EEPROM read access */ + + SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */ +}; + +/* enhanced mode registers (BAR4) */ +enum hw_registers { + MVS_GBL_CTL = 0x04, /* global control */ + MVS_GBL_INT_STAT = 0x08, /* global irq status */ + MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ + MVS_GBL_PORT_TYPE = 0xa0, /* port type */ + + MVS_CTL = 0x100, /* SAS/SATA port configuration */ + MVS_PCS = 0x104, /* SAS/SATA port control/status */ + MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ + MVS_CMD_LIST_HI = 0x10C, + MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ + MVS_RX_FIS_HI = 0x114, + + MVS_TX_CFG = 0x120, /* TX configuration */ + MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ + MVS_TX_HI = 0x128, + + MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ + MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ + MVS_RX_CFG = 0x134, /* RX configuration */ + MVS_RX_LO = 0x138, /* RX (completion) ring addr */ + MVS_RX_HI = 0x13C, + MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ + + MVS_INT_COAL = 0x148, /* Int coalescing config */ + MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ + MVS_INT_STAT = 0x150, /* Central int status */ + MVS_INT_MASK = 0x154, /* Central int enable */ + MVS_INT_STAT_SRS = 0x158, /* SATA register set status */ + MVS_INT_MASK_SRS = 0x15C, + + /* ports 1-3 follow after this */ + MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */ + MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */ + MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */ + MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */ + + /* ports 1-3 follow after this */ + MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */ + MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */ + + MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */ + MVS_CMD_DATA = 0x1BC, /* Command register port (data) */ + + /* ports 1-3 follow after this */ + MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */ + MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */ + MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */ + MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */ + + /* ports 1-3 follow after this */ + MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */ + MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */ + MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */ + MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */ +}; + +enum hw_register_bits { + /* MVS_GBL_CTL */ + INT_EN = (1U << 1), /* Global int enable */ + HBA_RST = (1U << 0), /* HBA reset */ + + /* MVS_GBL_INT_STAT */ + INT_XOR = (1U << 4), /* XOR engine event */ + INT_SAS_SATA = (1U << 0), /* SAS/SATA event */ + + /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */ + SATA_TARGET = (1U << 16), /* port0 SATA target enable */ + MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */ + MODE_AUTO_DET_PORT6 = (1U << 14), + MODE_AUTO_DET_PORT5 = (1U << 13), + MODE_AUTO_DET_PORT4 = (1U << 12), + MODE_AUTO_DET_PORT3 = (1U << 11), + MODE_AUTO_DET_PORT2 = (1U << 10), + MODE_AUTO_DET_PORT1 = (1U << 9), + MODE_AUTO_DET_PORT0 = (1U << 8), + MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 | + MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 | + MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 | + MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7, + MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */ + MODE_SAS_PORT6_MASK = (1U << 6), + MODE_SAS_PORT5_MASK = (1U << 5), + MODE_SAS_PORT4_MASK = (1U << 4), + MODE_SAS_PORT3_MASK = (1U << 3), + MODE_SAS_PORT2_MASK = (1U << 2), + MODE_SAS_PORT1_MASK = (1U << 1), + MODE_SAS_PORT0_MASK = (1U << 0), + MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK | + MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK | + MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK | + MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK, + + /* SAS_MODE value may be + * dictated (in hw) by values + * of SATA_TARGET & AUTO_DET + */ + + /* MVS_TX_CFG */ + TX_EN = (1U << 16), /* Enable TX */ + TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */ + + /* MVS_RX_CFG */ + RX_EN = (1U << 16), /* Enable RX */ + RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */ + + /* MVS_INT_COAL */ + COAL_EN = (1U << 16), /* Enable int coalescing */ + + /* MVS_INT_STAT, MVS_INT_MASK */ + CINT_I2C = (1U << 31), /* I2C event */ + CINT_SW0 = (1U << 30), /* software event 0 */ + CINT_SW1 = (1U << 29), /* software event 1 */ + CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */ + CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */ + CINT_MEM = (1U << 26), /* int mem parity err */ + CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ + CINT_SRS = (1U << 3), /* SRS event */ + CINT_CI_STOP = (1U << 1), /* cmd issue stopped */ + CINT_DONE = (1U << 0), /* cmd completion */ + + /* shl for ports 1-3 */ + CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */ + CINT_PORT = (1U << 8), /* port0 event */ + CINT_PORT_MASK_OFFSET = 8, + CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET), + + /* TX (delivery) ring bits */ + TXQ_CMD_SHIFT = 29, + TXQ_CMD_SSP = 1, /* SSP protocol */ + TXQ_CMD_SMP = 2, /* SMP protocol */ + TXQ_CMD_STP = 3, /* STP/SATA protocol */ + TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ + TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ + TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ + TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */ + TXQ_SRS_SHIFT = 20, /* SATA register set */ + TXQ_SRS_MASK = 0x7f, + TXQ_PHY_SHIFT = 12, /* PHY bitmap */ + TXQ_PHY_MASK = 0xff, + TXQ_SLOT_MASK = 0xfff, /* slot number */ + + /* RX (completion) ring bits */ + RXQ_GOOD = (1U << 23), /* Response good */ + RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */ + RXQ_CMD_RX = (1U << 20), /* target cmd received */ + RXQ_ATTN = (1U << 19), /* attention */ + RXQ_RSP = (1U << 18), /* response frame xfer'd */ + RXQ_ERR = (1U << 17), /* err info rec xfer'd */ + RXQ_DONE = (1U << 16), /* cmd complete */ + RXQ_SLOT_MASK = 0xfff, /* slot number */ + + /* mvs_cmd_hdr bits */ + MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */ + MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */ + + /* SSP initiator only */ + MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */ + + /* SSP initiator or target */ + MCH_SSP_FR_TASK = 0x1, /* TASK frame */ + + /* SSP target only */ + MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */ + MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */ + MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */ + MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */ + + MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */ + MCH_FBURST = (1U << 11), /* first burst (SSP) */ + MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */ + MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */ + MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */ + MCH_RESET = (1U << 7), /* Reset (STP/SATA) */ + MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */ + MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */ + MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */ + MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/ + + CCTL_RST = (1U << 5), /* port logic reset */ + + /* 0(LSB first), 1(MSB first) */ + CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */ + CCTL_ENDIAN_RSP = (1U << 2), /* response frame */ + CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */ + CCTL_ENDIAN_CMD = (1U << 0), /* command table */ + + /* MVS_Px_SER_CTLSTAT (per-phy control) */ + PHY_SSP_RST = (1U << 3), /* reset SSP link layer */ + PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */ + PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */ + PHY_RST = (1U << 0), /* phy reset */ + PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8), + PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), + PHY_NEG_SPP_PHYS_LINK_RATE_MASK = + (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), + PHY_READY_MASK = (1U << 20), + + /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */ + PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */ + PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */ + PHYEV_AN = (1U << 18), /* SATA async notification */ + PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */ + PHYEV_SIG_FIS = (1U << 16), /* signature FIS */ + PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */ + PHYEV_IU_BIG = (1U << 11), /* IU too long err */ + PHYEV_IU_SMALL = (1U << 10), /* IU too short err */ + PHYEV_UNK_TAG = (1U << 9), /* unknown tag */ + PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */ + PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */ + PHYEV_PORT_SEL = (1U << 6), /* port selector present */ + PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */ + PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */ + PHYEV_ID_FAIL = (1U << 3), /* identify failed */ + PHYEV_ID_DONE = (1U << 2), /* identify done */ + PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */ + PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */ + + /* MVS_PCS */ + PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */ + PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */ + PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */ + PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */ + PCS_RSP_RX_EN = (1U << 7), /* raw response rx */ + PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */ + PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */ + PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */ + PCS_CMD_RST = (1U << 1), /* reset cmd issue */ + PCS_CMD_EN = (1U << 0), /* enable cmd issue */ + + /* Port n Attached Device Info */ + PORT_DEV_SSP_TRGT = (1U << 19), + PORT_DEV_SMP_TRGT = (1U << 18), + PORT_DEV_STP_TRGT = (1U << 17), + PORT_DEV_SSP_INIT = (1U << 11), + PORT_DEV_SMP_INIT = (1U << 10), + PORT_DEV_STP_INIT = (1U << 9), + PORT_PHY_ID_MASK = (0xFFU << 24), + PORT_DEV_TRGT_MASK = (0x7U << 17), + PORT_DEV_INIT_MASK = (0x7U << 9), + PORT_DEV_TYPE_MASK = (0x7U << 0), + + /* Port n PHY Status */ + PHY_RDY = (1U << 2), + PHY_DW_SYNC = (1U << 1), + PHY_OOB_DTCTD = (1U << 0), + + /* VSR */ + /* PHYMODE 6 (CDB) */ + PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */ + PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */ + PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/ + PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */ + PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */ + PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */ + PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */ + PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */ + PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */ + PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */ + PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */ + PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */ + PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */ + PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */ +}; + +enum mvs_info_flags { + MVF_MSI = (1U << 0), /* MSI is enabled */ + MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ +}; + +enum sas_cmd_port_registers { + CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */ + CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */ + CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */ + CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */ + CMD_OOB_SPACE = 0x110, /* OOB space control register */ + CMD_OOB_BURST = 0x114, /* OOB burst control register */ + CMD_PHY_TIMER = 0x118, /* PHY timer control register */ + CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ + CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ + CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */ + CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */ + CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */ + CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */ + CMD_ID_TEST = 0x134, /* ID test register */ + CMD_PL_TIMER = 0x138, /* PL timer register */ + CMD_WD_TIMER = 0x13c, /* WD timer register */ + CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */ + CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ + CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ + CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ + CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */ + CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */ + CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */ + CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */ + CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */ + CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */ + CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */ + CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */ + CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */ + CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */ + CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */ + CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */ + CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */ + CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */ + CMD_RESET_COUNT = 0x188, /* Reset Count */ + CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */ + CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */ + CMD_PHY_CTL = 0x194, /* PHY Control and Status */ + CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */ + CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */ + CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */ + CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */ + CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */ + CMD_HOST_CTL = 0x1AC, /* Host Control Status */ + CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */ + CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */ + CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */ + CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */ + CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */ + CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ +}; + +/* SAS/SATA configuration port registers, aka phy registers */ +enum sas_sata_config_port_regs { + PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */ + PHYR_ADDR_LO = 0x04, /* my SAS address (low) */ + PHYR_ADDR_HI = 0x08, /* my SAS address (high) */ + PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */ + PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */ + PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */ + PHYR_SATA_CTL = 0x18, /* SATA control */ + PHYR_PHY_STAT = 0x1C, /* PHY status */ + PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ + PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ + PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ + PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ + PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */ + PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */ + PHYR_WIDE_PORT = 0x38, /* wide port participating */ + PHYR_CURRENT0 = 0x80, /* current connection info 0 */ + PHYR_CURRENT1 = 0x84, /* current connection info 1 */ + PHYR_CURRENT2 = 0x88, /* current connection info 2 */ +}; + +/* SAS/SATA Vendor Specific Port Registers */ +enum sas_sata_vsp_regs { + VSR_PHY_STAT = 0x00, /* Phy Status */ + VSR_PHY_MODE1 = 0x01, /* phy tx */ + VSR_PHY_MODE2 = 0x02, /* tx scc */ + VSR_PHY_MODE3 = 0x03, /* pll */ + VSR_PHY_MODE4 = 0x04, /* VCO */ + VSR_PHY_MODE5 = 0x05, /* Rx */ + VSR_PHY_MODE6 = 0x06, /* CDR */ + VSR_PHY_MODE7 = 0x07, /* Impedance */ + VSR_PHY_MODE8 = 0x08, /* Voltage */ + VSR_PHY_MODE9 = 0x09, /* Test */ + VSR_PHY_MODE10 = 0x0A, /* Power */ + VSR_PHY_MODE11 = 0x0B, /* Phy Mode */ + VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */ + VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ +}; + +enum pci_cfg_registers { + PCR_PHY_CTL = 0x40, + PCR_PHY_CTL2 = 0x90, + PCR_DEV_CTRL = 0xE8, +}; + +enum pci_cfg_register_bits { + PCTL_PWR_ON = (0xFU << 24), + PCTL_OFF = (0xFU << 12), + PRD_REQ_SIZE = (0x4000), + PRD_REQ_MASK = (0x00007000), +}; + +enum nvram_layout_offsets { + NVR_SIG = 0x00, /* 0xAA, 0x55 */ + NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */ +}; + +enum chip_flavors { + chip_6320, + chip_6440, + chip_6480, +}; + +enum port_type { + PORT_TYPE_SAS = (1L << 1), + PORT_TYPE_SATA = (1L << 0), +}; + +/* Command Table Format */ +enum ct_format { + /* SSP */ + SSP_F_H = 0x00, + SSP_F_IU = 0x18, + SSP_F_MAX = 0x4D, + /* STP */ + STP_CMD_FIS = 0x00, + STP_ATAPI_CMD = 0x40, + STP_F_MAX = 0x10, + /* SMP */ + SMP_F_T = 0x00, + SMP_F_DEP = 0x01, + SMP_F_MAX = 0x101, +}; + +enum status_buffer { + SB_EIR_OFF = 0x00, /* Error Information Record */ + SB_RFB_OFF = 0x08, /* Response Frame Buffer */ + SB_RFB_MAX = 0x400, /* RFB size*/ +}; + +enum error_info_rec { + CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */ + CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */ + RSP_OVER = (1U << 29), /* rsp buffer overflow */ + RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */ + UNK_FIS = (1U << 27), /* unknown FIS */ + DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */ + SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */ + TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */ + R_ERR = (1U << 23), /* SATA returned R_ERR prim */ + RD_OFS = (1U << 20), /* Read DATA frame invalid offset */ + XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */ + UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */ + DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */ + INTERLOCK = (1U << 15), /* interlock error */ + NAK = (1U << 14), /* NAK rx'd */ + ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */ + CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */ + OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */ + PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */ + NO_DEST = (1U << 9), /* I_T nexus lost, no destination */ + STP_RES_BSY = (1U << 8), /* STP resources busy */ + BREAK = (1U << 7), /* break received */ + BAD_DEST = (1U << 6), /* bad destination */ + BAD_PROTO = (1U << 5), /* protocol not supported */ + BAD_RATE = (1U << 4), /* cxn rate not supported */ + WRONG_DEST = (1U << 3), /* wrong destination error */ + CREDIT_TO = (1U << 2), /* credit timeout */ + WDOG_TO = (1U << 1), /* watchdog timeout */ + BUF_PAR = (1U << 0), /* buffer parity error */ +}; + +enum error_info_rec_2 { + SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */ + GRD_CHK_ERR = (1U << 14), /* Guard Check Error */ + APP_CHK_ERR = (1U << 13), /* Application Check error */ + REF_CHK_ERR = (1U << 12), /* Reference Check Error */ + USR_BLK_NM = (1U << 0), /* User Block Number */ +}; + +struct mvs_chip_info { + u32 n_phy; + u32 srs_sz; + u32 slot_width; +}; + +struct mvs_err_info { + __le32 flags; + __le32 flags2; +}; + +struct mvs_prd { + __le64 addr; /* 64-bit buffer address */ + __le32 reserved; + __le32 len; /* 16-bit length */ +}; + +struct mvs_cmd_hdr { + __le32 flags; /* PRD tbl len; SAS, SATA ctl */ + __le32 lens; /* cmd, max resp frame len */ + __le32 tags; /* targ port xfer tag; tag */ + __le32 data_len; /* data xfer len */ + __le64 cmd_tbl; /* command table address */ + __le64 open_frame; /* open addr frame address */ + __le64 status_buf; /* status buffer address */ + __le64 prd_tbl; /* PRD tbl address */ + __le32 reserved[4]; +}; + +struct mvs_port { + struct asd_sas_port sas_port; + u8 port_attached; + u8 taskfileset; + u8 wide_port_phymap; + struct list_head list; +}; + +struct mvs_phy { + struct mvs_port *port; + struct asd_sas_phy sas_phy; + struct sas_identify identify; + struct scsi_device *sdev; + u64 dev_sas_addr; + u64 att_dev_sas_addr; + u32 att_dev_info; + u32 dev_info; + u32 phy_type; + u32 phy_status; + u32 irq_status; + u32 frame_rcvd_size; + u8 frame_rcvd[32]; + u8 phy_attached; + enum sas_linkrate minimum_linkrate; + enum sas_linkrate maximum_linkrate; +}; + +struct mvs_slot_info { + struct list_head list; + struct sas_task *task; + u32 n_elem; + u32 tx; + + /* DMA buffer for storing cmd tbl, open addr frame, status buffer, + * and PRD table + */ + void *buf; + dma_addr_t buf_dma; +#if _MV_DUMP + u32 cmd_size; +#endif + + void *response; + struct mvs_port *port; +}; + +struct mvs_info { + unsigned long flags; + + spinlock_t lock; /* host-wide lock */ + struct pci_dev *pdev; /* our device */ + void __iomem *regs; /* enhanced mode registers */ + void __iomem *peri_regs; /* peripheral registers */ + + u8 sas_addr[SAS_ADDR_SIZE]; + struct sas_ha_struct sas; /* SCSI/SAS glue */ + struct Scsi_Host *shost; + + __le32 *tx; /* TX (delivery) DMA ring */ + dma_addr_t tx_dma; + u32 tx_prod; /* cached next-producer idx */ + + __le32 *rx; /* RX (completion) DMA ring */ + dma_addr_t rx_dma; + u32 rx_cons; /* RX consumer idx */ + + __le32 *rx_fis; /* RX'd FIS area */ + dma_addr_t rx_fis_dma; + + struct mvs_cmd_hdr *slot; /* DMA command header slots */ + dma_addr_t slot_dma; + + const struct mvs_chip_info *chip; + + u8 tags[MVS_SLOTS]; + struct mvs_slot_info slot_info[MVS_SLOTS]; + /* further per-slot information */ + struct mvs_phy phy[MVS_MAX_PHYS]; + struct mvs_port port[MVS_MAX_PHYS]; +#ifdef MVS_USE_TASKLET + struct tasklet_struct tasklet; +#endif +}; + +static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata); +static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port); +static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val); +static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port); +static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val); +static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val); +static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port); + +static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i); +static void mvs_detect_porttype(struct mvs_info *mvi, int i); +static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); +static void mvs_release_task(struct mvs_info *mvi, int phy_no); + +static int mvs_scan_finished(struct Scsi_Host *, unsigned long); +static void mvs_scan_start(struct Scsi_Host *); +static int mvs_slave_configure(struct scsi_device *sdev); + +static struct scsi_transport_template *mvs_stt; + +static const struct mvs_chip_info mvs_chips[] = { + [chip_6320] = { 2, 16, 9 }, + [chip_6440] = { 4, 16, 9 }, + [chip_6480] = { 8, 32, 10 }, +}; + +static struct scsi_host_template mvs_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .queuecommand = sas_queuecommand, + .target_alloc = sas_target_alloc, + .slave_configure = mvs_slave_configure, + .slave_destroy = sas_slave_destroy, + .scan_finished = mvs_scan_finished, + .scan_start = mvs_scan_start, + .change_queue_depth = sas_change_queue_depth, + .change_queue_type = sas_change_queue_type, + .bios_param = sas_bios_param, + .can_queue = 1, + .cmd_per_lun = 1, + .this_id = -1, + .sg_tablesize = SG_ALL, + .max_sectors = SCSI_DEFAULT_MAX_SECTORS, + .use_clustering = ENABLE_CLUSTERING, + .eh_device_reset_handler = sas_eh_device_reset_handler, + .eh_bus_reset_handler = sas_eh_bus_reset_handler, + .slave_alloc = sas_slave_alloc, + .target_destroy = sas_target_destroy, + .ioctl = sas_ioctl, +}; + +static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) +{ + u32 i; + u32 run; + u32 offset; + + offset = 0; + while (size) { + printk("%08X : ", baseaddr + offset); + if (size >= 16) + run = 16; + else + run = size; + size -= run; + for (i = 0; i < 16; i++) { + if (i < run) + printk("%02X ", (u32)data[i]); + else + printk(" "); + } + printk(": "); + for (i = 0; i < run; i++) + printk("%c", isalnum(data[i]) ? data[i] : '.'); + printk("\n"); + data = &data[16]; + offset += run; + } + printk("\n"); +} + +#if _MV_DUMP +static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, + enum sas_protocol proto) +{ + u32 offset; + struct pci_dev *pdev = mvi->pdev; + struct mvs_slot_info *slot = &mvi->slot_info[tag]; + + offset = slot->cmd_size + MVS_OAF_SZ + + sizeof(struct mvs_prd) * slot->n_elem; + dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n", + tag); + mvs_hexdump(32, (u8 *) slot->response, + (u32) slot->buf_dma + offset); +} +#endif + +static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag, + enum sas_protocol proto) +{ +#if _MV_DUMP + u32 sz, w_ptr; + u64 addr; + void __iomem *regs = mvi->regs; + struct pci_dev *pdev = mvi->pdev; + struct mvs_slot_info *slot = &mvi->slot_info[tag]; + + /*Delivery Queue */ + sz = mr32(TX_CFG) & TX_RING_SZ_MASK; + w_ptr = slot->tx; + addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO); + dev_printk(KERN_DEBUG, &pdev->dev, + "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr); + dev_printk(KERN_DEBUG, &pdev->dev, + "Delivery Queue Base Address=0x%llX (PA)" + "(tx_dma=0x%llX), Entry=%04d\n", + addr, mvi->tx_dma, w_ptr); + mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]), + (u32) mvi->tx_dma + sizeof(u32) * w_ptr); + /*Command List */ + addr = mvi->slot_dma; + dev_printk(KERN_DEBUG, &pdev->dev, + "Command List Base Address=0x%llX (PA)" + "(slot_dma=0x%llX), Header=%03d\n", + addr, slot->buf_dma, tag); + dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag); + /*mvs_cmd_hdr */ + mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]), + (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr)); + /*1.command table area */ + dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n"); + mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma); + /*2.open address frame area */ + dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n"); + mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size, + (u32) slot->buf_dma + slot->cmd_size); + /*3.status buffer */ + mvs_hba_sb_dump(mvi, tag, proto); + /*4.PRD table */ + dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n"); + mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem, + (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ, + (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ); +#endif +} + +static void mvs_hba_cq_dump(struct mvs_info *mvi) +{ +#if (_MV_DUMP > 2) + u64 addr; + void __iomem *regs = mvi->regs; + struct pci_dev *pdev = mvi->pdev; + u32 entry = mvi->rx_cons + 1; + u32 rx_desc = le32_to_cpu(mvi->rx[entry]); + + /*Completion Queue */ + addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO); + dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n", + mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task); + dev_printk(KERN_DEBUG, &pdev->dev, + "Completion List Base Address=0x%llX (PA), " + "CQ_Entry=%04d, CQ_WP=0x%08X\n", + addr, entry - 1, mvi->rx[0]); + mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc), + mvi->rx_dma + sizeof(u32) * entry); +#endif +} + +static void mvs_hba_interrupt_enable(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(GBL_CTL); + + mw32(GBL_CTL, tmp | INT_EN); +} + +static void mvs_hba_interrupt_disable(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(GBL_CTL); + + mw32(GBL_CTL, tmp & ~INT_EN); +} + +static int mvs_int_rx(struct mvs_info *mvi, bool self_clear); + +/* move to PCI layer or libata core? */ +static int pci_go_64(struct pci_dev *pdev) +{ + int rc; + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc) { + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "64-bit DMA enable failed\n"); + return rc; + } + } + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "32-bit DMA enable failed\n"); + return rc; + } + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_printk(KERN_ERR, &pdev->dev, + "32-bit consistent DMA enable failed\n"); + return rc; + } + } + + return rc; +} + +static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) +{ + if (task->lldd_task) { + struct mvs_slot_info *slot; + slot = (struct mvs_slot_info *) task->lldd_task; + *tag = slot - mvi->slot_info; + return 1; + } + return 0; +} + +static void mvs_tag_clear(struct mvs_info *mvi, u32 tag) +{ + void *bitmap = (void *) &mvi->tags; + clear_bit(tag, bitmap); +} + +static void mvs_tag_free(struct mvs_info *mvi, u32 tag) +{ + mvs_tag_clear(mvi, tag); +} + +static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) +{ + void *bitmap = (void *) &mvi->tags; + set_bit(tag, bitmap); +} + +static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) +{ + unsigned int index, tag; + void *bitmap = (void *) &mvi->tags; + + index = find_first_zero_bit(bitmap, MVS_SLOTS); + tag = index; + if (tag >= MVS_SLOTS) + return -SAS_QUEUE_FULL; + mvs_tag_set(mvi, tag); + *tag_out = tag; + return 0; +} + +static void mvs_tag_init(struct mvs_info *mvi) +{ + int i; + for (i = 0; i < MVS_SLOTS; ++i) + mvs_tag_clear(mvi, i); +} + +#ifndef MVS_DISABLE_NVRAM +static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data) +{ + int timeout = 1000; + + if (addr & ~SPI_ADDR_MASK) + return -EINVAL; + + writel(addr, regs + SPI_CMD); + writel(TWSI_RD, regs + SPI_CTL); + + while (timeout-- > 0) { + if (readl(regs + SPI_CTL) & TWSI_RDY) { + *data = readl(regs + SPI_DATA); + return 0; + } + + udelay(10); + } + + return -EBUSY; +} + +static int mvs_eep_read_buf(void __iomem *regs, u32 addr, + void *buf, u32 buflen) +{ + u32 addr_end, tmp_addr, i, j; + u32 tmp = 0; + int rc; + u8 *tmp8, *buf8 = buf; + + addr_end = addr + buflen; + tmp_addr = ALIGN(addr, 4); + if (addr > 0xff) + return -EINVAL; + + j = addr & 0x3; + if (j) { + rc = mvs_eep_read(regs, tmp_addr, &tmp); + if (rc) + return rc; + + tmp8 = (u8 *)&tmp; + for (i = j; i < 4; i++) + *buf8++ = tmp8[i]; + + tmp_addr += 4; + } + + for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) { + rc = mvs_eep_read(regs, tmp_addr, &tmp); + if (rc) + return rc; + + memcpy(buf8, &tmp, 4); + buf8 += 4; + } + + if (tmp_addr < addr_end) { + rc = mvs_eep_read(regs, tmp_addr, &tmp); + if (rc) + return rc; + + tmp8 = (u8 *)&tmp; + j = addr_end - tmp_addr; + for (i = 0; i < j; i++) + *buf8++ = tmp8[i]; + + tmp_addr += 4; + } + + return 0; +} +#endif + +static int mvs_nvram_read(struct mvs_info *mvi, u32 addr, + void *buf, u32 buflen) +{ +#ifndef MVS_DISABLE_NVRAM + void __iomem *regs = mvi->regs; + int rc, i; + u32 sum; + u8 hdr[2], *tmp; + const char *msg; + + rc = mvs_eep_read_buf(regs, addr, &hdr, 2); + if (rc) { + msg = "nvram hdr read failed"; + goto err_out; + } + rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen); + if (rc) { + msg = "nvram read failed"; + goto err_out; + } + + if (hdr[0] != 0x5A) { + /* entry id */ + msg = "invalid nvram entry id"; + rc = -ENOENT; + goto err_out; + } + + tmp = buf; + sum = ((u32)hdr[0]) + ((u32)hdr[1]); + for (i = 0; i < buflen; i++) + sum += ((u32)tmp[i]); + + if (sum) { + msg = "nvram checksum failure"; + rc = -EILSEQ; + goto err_out; + } + + return 0; + +err_out: + dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg); + return rc; +#else + /* FIXME , For SAS target mode */ + memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8); + return 0; +#endif +} + +static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; + + if (!phy->phy_attached) + return; + + if (sas_phy->phy) { + struct sas_phy *sphy = sas_phy->phy; + + sphy->negotiated_linkrate = sas_phy->linkrate; + sphy->minimum_linkrate = phy->minimum_linkrate; + sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; + sphy->maximum_linkrate = phy->maximum_linkrate; + sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS; + } + + if (phy->phy_type & PORT_TYPE_SAS) { + struct sas_identify_frame *id; + + id = (struct sas_identify_frame *)phy->frame_rcvd; + id->dev_type = phy->identify.device_type; + id->initiator_bits = SAS_PROTOCOL_ALL; + id->target_bits = phy->identify.target_port_protocols; + } else if (phy->phy_type & PORT_TYPE_SATA) { + /* TODO */ + } + mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size; + mvi->sas.notify_port_event(mvi->sas.sas_phy[i], + PORTE_BYTES_DMAED); +} + +static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + /* give the phy enabling interrupt event time to come in (1s + * is empirically about all it takes) */ + if (time < HZ) + return 0; + /* Wait for discovery to finish */ + scsi_flush_work(shost); + return 1; +} + +static void mvs_scan_start(struct Scsi_Host *shost) +{ + int i; + struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha; + + for (i = 0; i < mvi->chip->n_phy; ++i) { + mvs_bytes_dmaed(mvi, i); + } +} + +static int mvs_slave_configure(struct scsi_device *sdev) +{ + struct domain_device *dev = sdev_to_domain_dev(sdev); + int ret = sas_slave_configure(sdev); + + if (ret) + return ret; + + if (dev_is_sata(dev)) { + /* struct ata_port *ap = dev->sata_dev.ap; */ + /* struct ata_device *adev = ap->link.device; */ + + /* clamp at no NCQ for the time being */ + /* adev->flags |= ATA_DFLAG_NCQ_OFF; */ + scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); + } + return 0; +} + +static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) +{ + struct pci_dev *pdev = mvi->pdev; + struct sas_ha_struct *sas_ha = &mvi->sas; + struct mvs_phy *phy = &mvi->phy[phy_no]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no); + /* + * events is port event now , + * we need check the interrupt status which belongs to per port. + */ + dev_printk(KERN_DEBUG, &pdev->dev, + "Port %d Event = %X\n", + phy_no, phy->irq_status); + + if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) { + mvs_release_task(mvi, phy_no); + if (!mvs_is_phy_ready(mvi, phy_no)) { + sas_phy_disconnected(sas_phy); + sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL); + dev_printk(KERN_INFO, &pdev->dev, + "Port %d Unplug Notice\n", phy_no); + + } else + mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL); + } + if (!(phy->irq_status & PHYEV_DEC_ERR)) { + if (phy->irq_status & PHYEV_COMWAKE) { + u32 tmp = mvs_read_port_irq_mask(mvi, phy_no); + mvs_write_port_irq_mask(mvi, phy_no, + tmp | PHYEV_SIG_FIS); + } + if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { + phy->phy_status = mvs_is_phy_ready(mvi, phy_no); + if (phy->phy_status) { + mvs_detect_porttype(mvi, phy_no); + + if (phy->phy_type & PORT_TYPE_SATA) { + u32 tmp = mvs_read_port_irq_mask(mvi, + phy_no); + tmp &= ~PHYEV_SIG_FIS; + mvs_write_port_irq_mask(mvi, + phy_no, tmp); + } + + mvs_update_phyinfo(mvi, phy_no, 0); + sas_ha->notify_phy_event(sas_phy, + PHYE_OOB_DONE); + mvs_bytes_dmaed(mvi, phy_no); + } else { + dev_printk(KERN_DEBUG, &pdev->dev, + "plugin interrupt but phy is gone\n"); + mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, + NULL); + } + } else if (phy->irq_status & PHYEV_BROAD_CH) { + mvs_release_task(mvi, phy_no); + sas_ha->notify_port_event(sas_phy, + PORTE_BROADCAST_RCVD); + } + } + mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status); +} + +static void mvs_int_sata(struct mvs_info *mvi) +{ + u32 tmp; + void __iomem *regs = mvi->regs; + tmp = mr32(INT_STAT_SRS); + mw32(INT_STAT_SRS, tmp & 0xFFFF); +} + +static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task, + u32 slot_idx) +{ + void __iomem *regs = mvi->regs; + struct domain_device *dev = task->dev; + struct asd_sas_port *sas_port = dev->port; + struct mvs_port *port = mvi->slot_info[slot_idx].port; + u32 reg_set, phy_mask; + + if (!sas_protocol_ata(task->task_proto)) { + reg_set = 0; + phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : + sas_port->phy_mask; + } else { + reg_set = port->taskfileset; + phy_mask = sas_port->phy_mask; + } + mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx | + (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) | + (phy_mask << TXQ_PHY_SHIFT) | + (reg_set << TXQ_SRS_SHIFT)); + + mw32(TX_PROD_IDX, mvi->tx_prod); + mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); +} + +static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, + u32 slot_idx, int err) +{ + struct mvs_port *port = mvi->slot_info[slot_idx].port; + struct task_status_struct *tstat = &task->task_status; + struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; + int stat = SAM_GOOD; + + resp->frame_len = sizeof(struct dev_to_host_fis); + memcpy(&resp->ending_fis[0], + SATA_RECEIVED_D2H_FIS(port->taskfileset), + sizeof(struct dev_to_host_fis)); + tstat->buf_valid_size = sizeof(*resp); + if (unlikely(err)) + stat = SAS_PROTO_RESPONSE; + return stat; +} + +static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) +{ + u32 slot_idx = rx_desc & RXQ_SLOT_MASK; + mvs_tag_clear(mvi, slot_idx); +} + +static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, + struct mvs_slot_info *slot, u32 slot_idx) +{ + if (!sas_protocol_ata(task->task_proto)) + if (slot->n_elem) + pci_unmap_sg(mvi->pdev, task->scatter, + slot->n_elem, task->data_dir); + + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); + pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1, + PCI_DMA_TODEVICE); + break; + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SSP: + default: + /* do nothing */ + break; + } + list_del(&slot->list); + task->lldd_task = NULL; + slot->task = NULL; + slot->port = NULL; +} + +static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, + u32 slot_idx) +{ + struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; + u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); + u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4)); + int stat = SAM_CHECK_COND; + + if (err_dw1 & SLOT_BSY_ERR) { + stat = SAS_QUEUE_FULL; + mvs_slot_reset(mvi, task, slot_idx); + } + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + break; + case SAS_PROTOCOL_SMP: + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + if (err_dw0 & TFILE_ERR) + stat = mvs_sata_done(mvi, task, slot_idx, 1); + break; + default: + break; + } + + mvs_hexdump(16, (u8 *) slot->response, 0); + return stat; +} + +static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) +{ + u32 slot_idx = rx_desc & RXQ_SLOT_MASK; + struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; + struct sas_task *task = slot->task; + struct task_status_struct *tstat; + struct mvs_port *port; + bool aborted; + void *to; + + if (unlikely(!task || !task->lldd_task)) + return -1; + + mvs_hba_cq_dump(mvi); + + spin_lock(&task->task_state_lock); + aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; + if (!aborted) { + task->task_state_flags &= + ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); + task->task_state_flags |= SAS_TASK_STATE_DONE; + } + spin_unlock(&task->task_state_lock); + + if (aborted) { + mvs_slot_task_free(mvi, task, slot, slot_idx); + mvs_slot_free(mvi, rx_desc); + return -1; + } + + port = slot->port; + tstat = &task->task_status; + memset(tstat, 0, sizeof(*tstat)); + tstat->resp = SAS_TASK_COMPLETE; + + if (unlikely(!port->port_attached || flags)) { + mvs_slot_err(mvi, task, slot_idx); + if (!sas_protocol_ata(task->task_proto)) + tstat->stat = SAS_PHY_DOWN; + goto out; + } + + /* error info record present */ + if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { + tstat->stat = mvs_slot_err(mvi, task, slot_idx); + goto out; + } + + switch (task->task_proto) { + case SAS_PROTOCOL_SSP: + /* hw says status == 0, datapres == 0 */ + if (rx_desc & RXQ_GOOD) { + tstat->stat = SAM_GOOD; + tstat->resp = SAS_TASK_COMPLETE; + } + /* response frame present */ + else if (rx_desc & RXQ_RSP) { + struct ssp_response_iu *iu = + slot->response + sizeof(struct mvs_err_info); + sas_ssp_task_response(&mvi->pdev->dev, task, iu); + } + + /* should never happen? */ + else + tstat->stat = SAM_CHECK_COND; + break; + + case SAS_PROTOCOL_SMP: { + struct scatterlist *sg_resp = &task->smp_task.smp_resp; + tstat->stat = SAM_GOOD; + to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); + memcpy(to + sg_resp->offset, + slot->response + sizeof(struct mvs_err_info), + sg_dma_len(sg_resp)); + kunmap_atomic(to, KM_IRQ0); + break; + } + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { + tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); + break; + } + + default: + tstat->stat = SAM_CHECK_COND; + break; + } + +out: + mvs_slot_task_free(mvi, task, slot, slot_idx); + if (unlikely(tstat->stat != SAS_QUEUE_FULL)) + mvs_slot_free(mvi, rx_desc); + + spin_unlock(&mvi->lock); + task->task_done(task); + spin_lock(&mvi->lock); + return tstat->stat; +} + +static void mvs_release_task(struct mvs_info *mvi, int phy_no) +{ + struct list_head *pos, *n; + struct mvs_slot_info *slot; + struct mvs_phy *phy = &mvi->phy[phy_no]; + struct mvs_port *port = phy->port; + u32 rx_desc; + + if (!port) + return; + + list_for_each_safe(pos, n, &port->list) { + slot = container_of(pos, struct mvs_slot_info, list); + rx_desc = (u32) (slot - mvi->slot_info); + mvs_slot_complete(mvi, rx_desc, 1); + } +} + +static void mvs_int_full(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp, stat; + int i; + + stat = mr32(INT_STAT); + + mvs_int_rx(mvi, false); + + for (i = 0; i < MVS_MAX_PORTS; i++) { + tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); + if (tmp) + mvs_int_port(mvi, i, tmp); + } + + if (stat & CINT_SRS) + mvs_int_sata(mvi); + + mw32(INT_STAT, stat); +} + +static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) +{ + void __iomem *regs = mvi->regs; + u32 rx_prod_idx, rx_desc; + bool attn = false; + struct pci_dev *pdev = mvi->pdev; + + /* the first dword in the RX ring is special: it contains + * a mirror of the hardware's RX producer index, so that + * we don't have to stall the CPU reading that register. + * The actual RX ring is offset by one dword, due to this. + */ + rx_prod_idx = mvi->rx_cons; + mvi->rx_cons = le32_to_cpu(mvi->rx[0]); + if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ + return 0; + + /* The CMPL_Q may come late, read from register and try again + * note: if coalescing is enabled, + * it will need to read from register every time for sure + */ + if (mvi->rx_cons == rx_prod_idx) + mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK; + + if (mvi->rx_cons == rx_prod_idx) + return 0; + + while (mvi->rx_cons != rx_prod_idx) { + + /* increment our internal RX consumer pointer */ + rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); + + rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); + + if (likely(rx_desc & RXQ_DONE)) + mvs_slot_complete(mvi, rx_desc, 0); + if (rx_desc & RXQ_ATTN) { + attn = true; + dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n", + rx_desc); + } else if (rx_desc & RXQ_ERR) { + if (!(rx_desc & RXQ_DONE)) + mvs_slot_complete(mvi, rx_desc, 0); + dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n", + rx_desc); + } else if (rx_desc & RXQ_SLOT_RESET) { + dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n", + rx_desc); + mvs_slot_free(mvi, rx_desc); + } + } + + if (attn && self_clear) + mvs_int_full(mvi); + + return 0; +} + +#ifdef MVS_USE_TASKLET +static void mvs_tasklet(unsigned long data) +{ + struct mvs_info *mvi = (struct mvs_info *) data; + unsigned long flags; + + spin_lock_irqsave(&mvi->lock, flags); + +#ifdef MVS_DISABLE_MSI + mvs_int_full(mvi); +#else + mvs_int_rx(mvi, true); +#endif + spin_unlock_irqrestore(&mvi->lock, flags); +} +#endif + +static irqreturn_t mvs_interrupt(int irq, void *opaque) +{ + struct mvs_info *mvi = opaque; + void __iomem *regs = mvi->regs; + u32 stat; + + stat = mr32(GBL_INT_STAT); + + if (stat == 0 || stat == 0xffffffff) + return IRQ_NONE; + + /* clear CMD_CMPLT ASAP */ + mw32_f(INT_STAT, CINT_DONE); + +#ifndef MVS_USE_TASKLET + spin_lock(&mvi->lock); + + mvs_int_full(mvi); + + spin_unlock(&mvi->lock); +#else + tasklet_schedule(&mvi->tasklet); +#endif + return IRQ_HANDLED; +} + +#ifndef MVS_DISABLE_MSI +static irqreturn_t mvs_msi_interrupt(int irq, void *opaque) +{ + struct mvs_info *mvi = opaque; + +#ifndef MVS_USE_TASKLET + spin_lock(&mvi->lock); + + mvs_int_rx(mvi, true); + + spin_unlock(&mvi->lock); +#else + tasklet_schedule(&mvi->tasklet); +#endif + return IRQ_HANDLED; +} +#endif + +struct mvs_task_exec_info { + struct sas_task *task; + struct mvs_cmd_hdr *hdr; + struct mvs_port *port; + u32 tag; + int n_elem; +}; + +static int mvs_task_prep_smp(struct mvs_info *mvi, + struct mvs_task_exec_info *tei) +{ + int elem, rc, i; + struct sas_task *task = tei->task; + struct mvs_cmd_hdr *hdr = tei->hdr; + struct scatterlist *sg_req, *sg_resp; + u32 req_len, resp_len, tag = tei->tag; + void *buf_tmp; + u8 *buf_oaf; + dma_addr_t buf_tmp_dma; + struct mvs_prd *buf_prd; + struct scatterlist *sg; + struct mvs_slot_info *slot = &mvi->slot_info[tag]; + struct asd_sas_port *sas_port = task->dev->port; + u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); +#if _MV_DUMP + u8 *buf_cmd; + void *from; +#endif + /* + * DMA-map SMP request, response buffers + */ + sg_req = &task->smp_task.smp_req; + elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); + + sg_resp = &task->smp_task.smp_resp; + elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out; + } + resp_len = sg_dma_len(sg_resp); + + /* must be in dwords */ + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_2; + } + + /* + * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs + */ + + /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ + buf_tmp = slot->buf; + buf_tmp_dma = slot->buf_dma; + +#if _MV_DUMP + buf_cmd = buf_tmp; + hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); + buf_tmp += req_len; + buf_tmp_dma += req_len; + slot->cmd_size = req_len; +#else + hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); +#endif + + /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ + buf_oaf = buf_tmp; + hdr->open_frame = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_OAF_SZ; + buf_tmp_dma += MVS_OAF_SZ; + + /* region 3: PRD table ********************************************* */ + buf_prd = buf_tmp; + if (tei->n_elem) + hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); + else + hdr->prd_tbl = 0; + + i = sizeof(struct mvs_prd) * tei->n_elem; + buf_tmp += i; + buf_tmp_dma += i; + + /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ + slot->response = buf_tmp; + hdr->status_buf = cpu_to_le64(buf_tmp_dma); + + /* + * Fill in TX ring and command slot header + */ + slot->tx = mvi->tx_prod; + mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | + TXQ_MODE_I | tag | + (sas_port->phy_mask << TXQ_PHY_SHIFT)); + + hdr->flags |= flags; + hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); + hdr->tags = cpu_to_le32(tag); + hdr->data_len = 0; + + /* generate open address frame hdr (first 12 bytes) */ + buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */ + buf_oaf[1] = task->dev->linkrate & 0xf; + *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ + memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); + + /* fill in PRD (scatter/gather) table, if any */ + for_each_sg(task->scatter, sg, tei->n_elem, i) { + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); + buf_prd->len = cpu_to_le32(sg_dma_len(sg)); + buf_prd++; + } + +#if _MV_DUMP + /* copy cmd table */ + from = kmap_atomic(sg_page(sg_req), KM_IRQ0); + memcpy(buf_cmd, from + sg_req->offset, req_len); + kunmap_atomic(from, KM_IRQ0); +#endif + return 0; + +err_out_2: + pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); +err_out: + pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1, + PCI_DMA_TODEVICE); + return rc; +} + +static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port) +{ + void __iomem *regs = mvi->regs; + u32 tmp, offs; + u8 *tfs = &port->taskfileset; + + if (*tfs == MVS_ID_NOT_MAPPED) + return; + + offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT); + if (*tfs < 16) { + tmp = mr32(PCS); + mw32(PCS, tmp & ~offs); + } else { + tmp = mr32(CTL); + mw32(CTL, tmp & ~offs); + } + + tmp = mr32(INT_STAT_SRS) & (1U << *tfs); + if (tmp) + mw32(INT_STAT_SRS, tmp); + + *tfs = MVS_ID_NOT_MAPPED; +} + +static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port) +{ + int i; + u32 tmp, offs; + void __iomem *regs = mvi->regs; + + if (port->taskfileset != MVS_ID_NOT_MAPPED) + return 0; + + tmp = mr32(PCS); + + for (i = 0; i < mvi->chip->srs_sz; i++) { + if (i == 16) + tmp = mr32(CTL); + offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT); + if (!(tmp & offs)) { + port->taskfileset = i; + + if (i < 16) + mw32(PCS, tmp | offs); + else + mw32(CTL, tmp | offs); + tmp = mr32(INT_STAT_SRS) & (1U << i); + if (tmp) + mw32(INT_STAT_SRS, tmp); + return 0; + } + } + return MVS_ID_NOT_MAPPED; +} + +static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) +{ + struct ata_queued_cmd *qc = task->uldd_task; + + if (qc) { + if (qc->tf.command == ATA_CMD_FPDMA_WRITE || + qc->tf.command == ATA_CMD_FPDMA_READ) { + *tag = qc->tag; + return 1; + } + } + + return 0; +} + +static int mvs_task_prep_ata(struct mvs_info *mvi, + struct mvs_task_exec_info *tei) +{ + struct sas_task *task = tei->task; + struct domain_device *dev = task->dev; + struct mvs_cmd_hdr *hdr = tei->hdr; + struct asd_sas_port *sas_port = dev->port; + struct mvs_slot_info *slot; + struct scatterlist *sg; + struct mvs_prd *buf_prd; + struct mvs_port *port = tei->port; + u32 tag = tei->tag; + u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); + void *buf_tmp; + u8 *buf_cmd, *buf_oaf; + dma_addr_t buf_tmp_dma; + u32 i, req_len, resp_len; + const u32 max_resp_len = SB_RFB_MAX; + + if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED) + return -EBUSY; + + slot = &mvi->slot_info[tag]; + slot->tx = mvi->tx_prod; + mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | + (TXQ_CMD_STP << TXQ_CMD_SHIFT) | + (sas_port->phy_mask << TXQ_PHY_SHIFT) | + (port->taskfileset << TXQ_SRS_SHIFT)); + + if (task->ata_task.use_ncq) + flags |= MCH_FPDMA; + if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { + if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) + flags |= MCH_ATAPI; + } + + /* FIXME: fill in port multiplier number */ + + hdr->flags = cpu_to_le32(flags); + + /* FIXME: the low order order 5 bits for the TAG if enable NCQ */ + if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags)) + task->ata_task.fis.sector_count |= hdr->tags << 3; + else + hdr->tags = cpu_to_le32(tag); + hdr->data_len = cpu_to_le32(task->total_xfer_len); + + /* + * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs + */ + + /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */ + buf_cmd = buf_tmp = slot->buf; + buf_tmp_dma = slot->buf_dma; + + hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_ATA_CMD_SZ; + buf_tmp_dma += MVS_ATA_CMD_SZ; +#if _MV_DUMP + slot->cmd_size = MVS_ATA_CMD_SZ; +#endif + + /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ + /* used for STP. unused for SATA? */ + buf_oaf = buf_tmp; + hdr->open_frame = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_OAF_SZ; + buf_tmp_dma += MVS_OAF_SZ; + + /* region 3: PRD table ********************************************* */ + buf_prd = buf_tmp; + if (tei->n_elem) + hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); + else + hdr->prd_tbl = 0; + + i = sizeof(struct mvs_prd) * tei->n_elem; + buf_tmp += i; + buf_tmp_dma += i; + + /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ + /* FIXME: probably unused, for SATA. kept here just in case + * we get a STP/SATA error information record + */ + slot->response = buf_tmp; + hdr->status_buf = cpu_to_le64(buf_tmp_dma); + + req_len = sizeof(struct host_to_dev_fis); + resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - + sizeof(struct mvs_err_info) - i; + + /* request, response lengths */ + resp_len = min(resp_len, max_resp_len); + hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); + + task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ + /* fill in command FIS and ATAPI CDB */ + memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); + if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) + memcpy(buf_cmd + STP_ATAPI_CMD, + task->ata_task.atapi_packet, 16); + + /* generate open address frame hdr (first 12 bytes) */ + buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */ + buf_oaf[1] = task->dev->linkrate & 0xf; + *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); + memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); + + /* fill in PRD (scatter/gather) table, if any */ + for_each_sg(task->scatter, sg, tei->n_elem, i) { + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); + buf_prd->len = cpu_to_le32(sg_dma_len(sg)); + buf_prd++; + } + + return 0; +} + +static int mvs_task_prep_ssp(struct mvs_info *mvi, + struct mvs_task_exec_info *tei) +{ + struct sas_task *task = tei->task; + struct mvs_cmd_hdr *hdr = tei->hdr; + struct mvs_port *port = tei->port; + struct mvs_slot_info *slot; + struct scatterlist *sg; + struct mvs_prd *buf_prd; + struct ssp_frame_hdr *ssp_hdr; + void *buf_tmp; + u8 *buf_cmd, *buf_oaf, fburst = 0; + dma_addr_t buf_tmp_dma; + u32 flags; + u32 resp_len, req_len, i, tag = tei->tag; + const u32 max_resp_len = SB_RFB_MAX; + u8 phy_mask; + + slot = &mvi->slot_info[tag]; + + phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap : + task->dev->port->phy_mask; + slot->tx = mvi->tx_prod; + mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | + (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | + (phy_mask << TXQ_PHY_SHIFT)); + + flags = MCH_RETRY; + if (task->ssp_task.enable_first_burst) { + flags |= MCH_FBURST; + fburst = (1 << 7); + } + hdr->flags = cpu_to_le32(flags | + (tei->n_elem << MCH_PRD_LEN_SHIFT) | + (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT)); + + hdr->tags = cpu_to_le32(tag); + hdr->data_len = cpu_to_le32(task->total_xfer_len); + + /* + * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs + */ + + /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ + buf_cmd = buf_tmp = slot->buf; + buf_tmp_dma = slot->buf_dma; + + hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_SSP_CMD_SZ; + buf_tmp_dma += MVS_SSP_CMD_SZ; +#if _MV_DUMP + slot->cmd_size = MVS_SSP_CMD_SZ; +#endif + + /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ + buf_oaf = buf_tmp; + hdr->open_frame = cpu_to_le64(buf_tmp_dma); + + buf_tmp += MVS_OAF_SZ; + buf_tmp_dma += MVS_OAF_SZ; + + /* region 3: PRD table ********************************************* */ + buf_prd = buf_tmp; + if (tei->n_elem) + hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); + else + hdr->prd_tbl = 0; + + i = sizeof(struct mvs_prd) * tei->n_elem; + buf_tmp += i; + buf_tmp_dma += i; + + /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ + slot->response = buf_tmp; + hdr->status_buf = cpu_to_le64(buf_tmp_dma); + + resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - + sizeof(struct mvs_err_info) - i; + resp_len = min(resp_len, max_resp_len); + + req_len = sizeof(struct ssp_frame_hdr) + 28; + + /* request, response lengths */ + hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); + + /* generate open address frame hdr (first 12 bytes) */ + buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */ + buf_oaf[1] = task->dev->linkrate & 0xf; + *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag); + memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE); + + /* fill in SSP frame header (Command Table.SSP frame header) */ + ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; + ssp_hdr->frame_type = SSP_COMMAND; + memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr, + HASHED_SAS_ADDR_SIZE); + memcpy(ssp_hdr->hashed_src_addr, + task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); + ssp_hdr->tag = cpu_to_be16(tag); + + /* fill in command frame IU */ + buf_cmd += sizeof(*ssp_hdr); + memcpy(buf_cmd, &task->ssp_task.LUN, 8); + buf_cmd[9] = fburst | task->ssp_task.task_attr | + (task->ssp_task.task_prio << 3); + memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); + + /* fill in PRD (scatter/gather) table, if any */ + for_each_sg(task->scatter, sg, tei->n_elem, i) { + buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); + buf_prd->len = cpu_to_le32(sg_dma_len(sg)); + buf_prd++; + } + + return 0; +} + +static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags) +{ + struct domain_device *dev = task->dev; + struct mvs_info *mvi = dev->port->ha->lldd_ha; + struct pci_dev *pdev = mvi->pdev; + void __iomem *regs = mvi->regs; + struct mvs_task_exec_info tei; + struct sas_task *t = task; + struct mvs_slot_info *slot; + u32 tag = 0xdeadbeef, rc, n_elem = 0; + unsigned long flags; + u32 n = num, pass = 0; + + spin_lock_irqsave(&mvi->lock, flags); + do { + dev = t->dev; + tei.port = &mvi->port[dev->port->id]; + + if (!tei.port->port_attached) { + if (sas_protocol_ata(t->task_proto)) { + rc = SAS_PHY_DOWN; + goto out_done; + } else { + struct task_status_struct *ts = &t->task_status; + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + t->task_done(t); + if (n > 1) + t = list_entry(t->list.next, + struct sas_task, list); + continue; + } + } + + if (!sas_protocol_ata(t->task_proto)) { + if (t->num_scatter) { + n_elem = pci_map_sg(mvi->pdev, t->scatter, + t->num_scatter, + t->data_dir); + if (!n_elem) { + rc = -ENOMEM; + goto err_out; + } + } + } else { + n_elem = t->num_scatter; + } + + rc = mvs_tag_alloc(mvi, &tag); + if (rc) + goto err_out; + + slot = &mvi->slot_info[tag]; + t->lldd_task = NULL; + slot->n_elem = n_elem; + memset(slot->buf, 0, MVS_SLOT_BUF_SZ); + tei.task = t; + tei.hdr = &mvi->slot[tag]; + tei.tag = tag; + tei.n_elem = n_elem; + + switch (t->task_proto) { + case SAS_PROTOCOL_SMP: + rc = mvs_task_prep_smp(mvi, &tei); + break; + case SAS_PROTOCOL_SSP: + rc = mvs_task_prep_ssp(mvi, &tei); + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + rc = mvs_task_prep_ata(mvi, &tei); + break; + default: + dev_printk(KERN_ERR, &pdev->dev, + "unknown sas_task proto: 0x%x\n", + t->task_proto); + rc = -EINVAL; + break; + } + + if (rc) + goto err_out_tag; + + slot->task = t; + slot->port = tei.port; + t->lldd_task = (void *) slot; + list_add_tail(&slot->list, &slot->port->list); + /* TODO: select normal or high priority */ + + spin_lock(&t->task_state_lock); + t->task_state_flags |= SAS_TASK_AT_INITIATOR; + spin_unlock(&t->task_state_lock); + + mvs_hba_memory_dump(mvi, tag, t->task_proto); + + ++pass; + mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); + if (n > 1) + t = list_entry(t->list.next, struct sas_task, list); + } while (--n); + + rc = 0; + goto out_done; + +err_out_tag: + mvs_tag_free(mvi, tag); +err_out: + dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc); + if (!sas_protocol_ata(t->task_proto)) + if (n_elem) + pci_unmap_sg(mvi->pdev, t->scatter, n_elem, + t->data_dir); +out_done: + if (pass) + mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); + spin_unlock_irqrestore(&mvi->lock, flags); + return rc; +} + +static int mvs_task_abort(struct sas_task *task) +{ + int rc; + unsigned long flags; + struct mvs_info *mvi = task->dev->port->ha->lldd_ha; + struct pci_dev *pdev = mvi->pdev; + int tag; + + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_STATE_DONE) { + rc = TMF_RESP_FUNC_COMPLETE; + spin_unlock_irqrestore(&task->task_state_lock, flags); + goto out_done; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + + switch (task->task_proto) { + case SAS_PROTOCOL_SMP: + dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n"); + break; + case SAS_PROTOCOL_SSP: + dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n"); + break; + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{ + dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n"); +#if _MV_DUMP + dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n"); + mvs_hexdump(sizeof(struct host_to_dev_fis), + (void *)&task->ata_task.fis, 0); + dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n"); + mvs_hexdump(16, task->ata_task.atapi_packet, 0); +#endif + spin_lock_irqsave(&task->task_state_lock, flags); + if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) { + /* TODO */ + ; + } + spin_unlock_irqrestore(&task->task_state_lock, flags); + break; + } + default: + break; + } + + if (mvs_find_tag(mvi, task, &tag)) { + spin_lock_irqsave(&mvi->lock, flags); + mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag); + spin_unlock_irqrestore(&mvi->lock, flags); + } + if (!mvs_task_exec(task, 1, GFP_ATOMIC)) + rc = TMF_RESP_FUNC_COMPLETE; + else + rc = TMF_RESP_FUNC_FAILED; +out_done: + return rc; +} + +static void mvs_free(struct mvs_info *mvi) +{ + int i; + + if (!mvi) + return; + + for (i = 0; i < MVS_SLOTS; i++) { + struct mvs_slot_info *slot = &mvi->slot_info[i]; + + if (slot->buf) + dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ, + slot->buf, slot->buf_dma); + } + + if (mvi->tx) + dma_free_coherent(&mvi->pdev->dev, + sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, + mvi->tx, mvi->tx_dma); + if (mvi->rx_fis) + dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ, + mvi->rx_fis, mvi->rx_fis_dma); + if (mvi->rx) + dma_free_coherent(&mvi->pdev->dev, + sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), + mvi->rx, mvi->rx_dma); + if (mvi->slot) + dma_free_coherent(&mvi->pdev->dev, + sizeof(*mvi->slot) * MVS_SLOTS, + mvi->slot, mvi->slot_dma); +#ifdef MVS_ENABLE_PERI + if (mvi->peri_regs) + iounmap(mvi->peri_regs); +#endif + if (mvi->regs) + iounmap(mvi->regs); + if (mvi->shost) + scsi_host_put(mvi->shost); + kfree(mvi->sas.sas_port); + kfree(mvi->sas.sas_phy); + kfree(mvi); +} + +/* FIXME: locking? */ +static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, + void *funcdata) +{ + struct mvs_info *mvi = sas_phy->ha->lldd_ha; + int rc = 0, phy_id = sas_phy->id; + u32 tmp; + + tmp = mvs_read_phy_ctl(mvi, phy_id); + + switch (func) { + case PHY_FUNC_SET_LINK_RATE:{ + struct sas_phy_linkrates *rates = funcdata; + u32 lrmin = 0, lrmax = 0; + + lrmin = (rates->minimum_linkrate << 8); + lrmax = (rates->maximum_linkrate << 12); + + if (lrmin) { + tmp &= ~(0xf << 8); + tmp |= lrmin; + } + if (lrmax) { + tmp &= ~(0xf << 12); + tmp |= lrmax; + } + mvs_write_phy_ctl(mvi, phy_id, tmp); + break; + } + + case PHY_FUNC_HARD_RESET: + if (tmp & PHY_RST_HARD) + break; + mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD); + break; + + case PHY_FUNC_LINK_RESET: + mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST); + break; + + case PHY_FUNC_DISABLE: + case PHY_FUNC_RELEASE_SPINUP_HOLD: + default: + rc = -EOPNOTSUPP; + } + + return rc; +} + +static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) +{ + struct mvs_phy *phy = &mvi->phy[phy_id]; + struct asd_sas_phy *sas_phy = &phy->sas_phy; + + sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; + sas_phy->class = SAS; + sas_phy->iproto = SAS_PROTOCOL_ALL; + sas_phy->tproto = 0; + sas_phy->type = PHY_TYPE_PHYSICAL; + sas_phy->role = PHY_ROLE_INITIATOR; + sas_phy->oob_mode = OOB_NOT_CONNECTED; + sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; + + sas_phy->id = phy_id; + sas_phy->sas_addr = &mvi->sas_addr[0]; + sas_phy->frame_rcvd = &phy->frame_rcvd[0]; + sas_phy->ha = &mvi->sas; + sas_phy->lldd_phy = phy; +} + +static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct mvs_info *mvi; + unsigned long res_start, res_len, res_flag; + struct asd_sas_phy **arr_phy; + struct asd_sas_port **arr_port; + const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data]; + int i; + + /* + * alloc and init our per-HBA mvs_info struct + */ + + mvi = kzalloc(sizeof(*mvi), GFP_KERNEL); + if (!mvi) + return NULL; + + spin_lock_init(&mvi->lock); +#ifdef MVS_USE_TASKLET + tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi); +#endif + mvi->pdev = pdev; + mvi->chip = chip; + + if (pdev->device == 0x6440 && pdev->revision == 0) + mvi->flags |= MVF_PHY_PWR_FIX; + + /* + * alloc and init SCSI, SAS glue + */ + + mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); + if (!mvi->shost) + goto err_out; + + arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); + arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL); + if (!arr_phy || !arr_port) + goto err_out; + + for (i = 0; i < MVS_MAX_PHYS; i++) { + mvs_phy_init(mvi, i); + arr_phy[i] = &mvi->phy[i].sas_phy; + arr_port[i] = &mvi->port[i].sas_port; + mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED; + mvi->port[i].wide_port_phymap = 0; + mvi->port[i].port_attached = 0; + INIT_LIST_HEAD(&mvi->port[i].list); + } + + SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas; + mvi->shost->transportt = mvs_stt; + mvi->shost->max_id = 21; + mvi->shost->max_lun = ~0; + mvi->shost->max_channel = 0; + mvi->shost->max_cmd_len = 16; + + mvi->sas.sas_ha_name = DRV_NAME; + mvi->sas.dev = &pdev->dev; + mvi->sas.lldd_module = THIS_MODULE; + mvi->sas.sas_addr = &mvi->sas_addr[0]; + mvi->sas.sas_phy = arr_phy; + mvi->sas.sas_port = arr_port; + mvi->sas.num_phys = chip->n_phy; + mvi->sas.lldd_max_execute_num = 1; + mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE; + mvi->shost->can_queue = MVS_CAN_QUEUE; + mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys; + mvi->sas.lldd_ha = mvi; + mvi->sas.core.shost = mvi->shost; + + mvs_tag_init(mvi); + + /* + * ioremap main and peripheral registers + */ + +#ifdef MVS_ENABLE_PERI + res_start = pci_resource_start(pdev, 2); + res_len = pci_resource_len(pdev, 2); + if (!res_start || !res_len) + goto err_out; + + mvi->peri_regs = ioremap_nocache(res_start, res_len); + if (!mvi->peri_regs) + goto err_out; +#endif + + res_start = pci_resource_start(pdev, 4); + res_len = pci_resource_len(pdev, 4); + if (!res_start || !res_len) + goto err_out; + + res_flag = pci_resource_flags(pdev, 4); + if (res_flag & IORESOURCE_CACHEABLE) + mvi->regs = ioremap(res_start, res_len); + else + mvi->regs = ioremap_nocache(res_start, res_len); + + if (!mvi->regs) + goto err_out; + + /* + * alloc and init our DMA areas + */ + + mvi->tx = dma_alloc_coherent(&pdev->dev, + sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, + &mvi->tx_dma, GFP_KERNEL); + if (!mvi->tx) + goto err_out; + memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ); + + mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ, + &mvi->rx_fis_dma, GFP_KERNEL); + if (!mvi->rx_fis) + goto err_out; + memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); + + mvi->rx = dma_alloc_coherent(&pdev->dev, + sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), + &mvi->rx_dma, GFP_KERNEL); + if (!mvi->rx) + goto err_out; + memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1)); + + mvi->rx[0] = cpu_to_le32(0xfff); + mvi->rx_cons = 0xfff; + + mvi->slot = dma_alloc_coherent(&pdev->dev, + sizeof(*mvi->slot) * MVS_SLOTS, + &mvi->slot_dma, GFP_KERNEL); + if (!mvi->slot) + goto err_out; + memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS); + + for (i = 0; i < MVS_SLOTS; i++) { + struct mvs_slot_info *slot = &mvi->slot_info[i]; + + slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ, + &slot->buf_dma, GFP_KERNEL); + if (!slot->buf) + goto err_out; + memset(slot->buf, 0, MVS_SLOT_BUF_SZ); + } + + /* finally, read NVRAM to get our SAS address */ + if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8)) + goto err_out; + return mvi; + +err_out: + mvs_free(mvi); + return NULL; +} + +static u32 mvs_cr32(void __iomem *regs, u32 addr) +{ + mw32(CMD_ADDR, addr); + return mr32(CMD_DATA); +} + +static void mvs_cw32(void __iomem *regs, u32 addr, u32 val) +{ + mw32(CMD_ADDR, addr); + mw32(CMD_DATA, val); +} + +static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port) +{ + void __iomem *regs = mvi->regs; + return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4): + mr32(P4_SER_CTLSTAT + (port - 4) * 4); +} + +static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val) +{ + void __iomem *regs = mvi->regs; + if (port < 4) + mw32(P0_SER_CTLSTAT + port * 4, val); + else + mw32(P4_SER_CTLSTAT + (port - 4) * 4, val); +} + +static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port) +{ + void __iomem *regs = mvi->regs + off; + void __iomem *regs2 = mvi->regs + off2; + return (port < 4)?readl(regs + port * 8): + readl(regs2 + (port - 4) * 8); +} + +static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2, + u32 port, u32 val) +{ + void __iomem *regs = mvi->regs + off; + void __iomem *regs2 = mvi->regs + off2; + if (port < 4) + writel(val, regs + port * 8); + else + writel(val, regs2 + (port - 4) * 8); +} + +static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port); +} + +static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val); +} + +static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr) +{ + mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr); +} + +static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port); +} + +static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val); +} + +static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr) +{ + mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr); +} + +static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port); +} + +static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val); +} + +static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port) +{ + return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port); +} + +static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val) +{ + mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val); +} + +static void __devinit mvs_phy_hacks(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + /* workaround for SATA R-ERR, to ignore phy glitch */ + tmp = mvs_cr32(regs, CMD_PHY_TIMER); + tmp &= ~(1 << 9); + tmp |= (1 << 10); + mvs_cw32(regs, CMD_PHY_TIMER, tmp); + + /* enable retry 127 times */ + mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f); + + /* extend open frame timeout to max */ + tmp = mvs_cr32(regs, CMD_SAS_CTL0); + tmp &= ~0xffff; + tmp |= 0x3fff; + mvs_cw32(regs, CMD_SAS_CTL0, tmp); + + /* workaround for WDTIMEOUT , set to 550 ms */ + mvs_cw32(regs, CMD_WD_TIMER, 0x86470); + + /* not to halt for different port op during wideport link change */ + mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d); + + /* workaround for Seagate disk not-found OOB sequence, recv + * COMINIT before sending out COMWAKE */ + tmp = mvs_cr32(regs, CMD_PHY_MODE_21); + tmp &= 0x0000ffff; + tmp |= 0x00fa0000; + mvs_cw32(regs, CMD_PHY_MODE_21, tmp); + + tmp = mvs_cr32(regs, CMD_PHY_TIMER); + tmp &= 0x1fffffff; + tmp |= (2U << 29); /* 8 ms retry */ + mvs_cw32(regs, CMD_PHY_TIMER, tmp); + + /* TEST - for phy decoding error, adjust voltage levels */ + mw32(P0_VSR_ADDR + 0, 0x8); + mw32(P0_VSR_DATA + 0, 0x2F0); + + mw32(P0_VSR_ADDR + 8, 0x8); + mw32(P0_VSR_DATA + 8, 0x2F0); + + mw32(P0_VSR_ADDR + 16, 0x8); + mw32(P0_VSR_DATA + 16, 0x2F0); + + mw32(P0_VSR_ADDR + 24, 0x8); + mw32(P0_VSR_DATA + 24, 0x2F0); + +} + +static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId) +{ + void __iomem *regs = mvi->regs; + u32 tmp; + + tmp = mr32(PCS); + if (mvi->chip->n_phy <= 4) + tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT); + else + tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2); + mw32(PCS, tmp); +} + +static void mvs_detect_porttype(struct mvs_info *mvi, int i) +{ + void __iomem *regs = mvi->regs; + u32 reg; + struct mvs_phy *phy = &mvi->phy[i]; + + /* TODO check & save device type */ + reg = mr32(GBL_PORT_TYPE); + + if (reg & MODE_SAS_SATA & (1 << i)) + phy->phy_type |= PORT_TYPE_SAS; + else + phy->phy_type |= PORT_TYPE_SATA; +} + +static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) +{ + u32 *s = (u32 *) buf; + + if (!s) + return NULL; + + mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); + s[3] = mvs_read_port_cfg_data(mvi, i); + + mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); + s[2] = mvs_read_port_cfg_data(mvi, i); + + mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); + s[1] = mvs_read_port_cfg_data(mvi, i); + + mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); + s[0] = mvs_read_port_cfg_data(mvi, i); + + return (void *)s; +} + +static u32 mvs_is_sig_fis_received(u32 irq_status) +{ + return irq_status & PHYEV_SIG_FIS; +} + +static void mvs_update_wideport(struct mvs_info *mvi, int i) +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct mvs_port *port = phy->port; + int j, no; + + for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy) + if (no & 1) { + mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); + mvs_write_port_cfg_data(mvi, no, + port->wide_port_phymap); + } else { + mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT); + mvs_write_port_cfg_data(mvi, no, 0); + } +} + +static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) +{ + u32 tmp; + struct mvs_phy *phy = &mvi->phy[i]; + struct mvs_port *port = phy->port;; + + tmp = mvs_read_phy_ctl(mvi, i); + + if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { + if (!port) + phy->phy_attached = 1; + return tmp; + } + + if (port) { + if (phy->phy_type & PORT_TYPE_SAS) { + port->wide_port_phymap &= ~(1U << i); + if (!port->wide_port_phymap) + port->port_attached = 0; + mvs_update_wideport(mvi, i); + } else if (phy->phy_type & PORT_TYPE_SATA) + port->port_attached = 0; + mvs_free_reg_set(mvi, phy->port); + phy->port = NULL; + phy->phy_attached = 0; + phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); + } + return 0; +} + +static void mvs_update_phyinfo(struct mvs_info *mvi, int i, + int get_st) +{ + struct mvs_phy *phy = &mvi->phy[i]; + struct pci_dev *pdev = mvi->pdev; + u32 tmp; + u64 tmp64; + + mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); + phy->dev_info = mvs_read_port_cfg_data(mvi, i); + + mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); + phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32; + + mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); + phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); + + if (get_st) { + phy->irq_status = mvs_read_port_irq_stat(mvi, i); + phy->phy_status = mvs_is_phy_ready(mvi, i); + } + + if (phy->phy_status) { + u32 phy_st; + struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i]; + + mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); + phy_st = mvs_read_port_cfg_data(mvi, i); + + sas_phy->linkrate = + (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; + phy->minimum_linkrate = + (phy->phy_status & + PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8; + phy->maximum_linkrate = + (phy->phy_status & + PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12; + + if (phy->phy_type & PORT_TYPE_SAS) { + /* Updated attached_sas_addr */ + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); + phy->att_dev_sas_addr = + (u64) mvs_read_port_cfg_data(mvi, i) << 32; + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); + phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); + mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); + phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); + phy->identify.device_type = + phy->att_dev_info & PORT_DEV_TYPE_MASK; + + if (phy->identify.device_type == SAS_END_DEV) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != NO_DEVICE) + phy->identify.target_port_protocols = + SAS_PROTOCOL_SMP; + if (phy_st & PHY_OOB_DTCTD) + sas_phy->oob_mode = SAS_OOB_MODE; + phy->frame_rcvd_size = + sizeof(struct sas_identify_frame); + } else if (phy->phy_type & PORT_TYPE_SATA) { + phy->identify.target_port_protocols = SAS_PROTOCOL_STP; + if (mvs_is_sig_fis_received(phy->irq_status)) { + phy->att_dev_sas_addr = i; /* temp */ + if (phy_st & PHY_OOB_DTCTD) + sas_phy->oob_mode = SATA_OOB_MODE; + phy->frame_rcvd_size = + sizeof(struct dev_to_host_fis); + mvs_get_d2h_reg(mvi, i, + (void *)sas_phy->frame_rcvd); + } else { + dev_printk(KERN_DEBUG, &pdev->dev, + "No sig fis\n"); + phy->phy_type &= ~(PORT_TYPE_SATA); + goto out_done; + } + } + tmp64 = cpu_to_be64(phy->att_dev_sas_addr); + memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE); + + dev_printk(KERN_DEBUG, &pdev->dev, + "phy[%d] Get Attached Address 0x%llX ," + " SAS Address 0x%llX\n", + i, + (unsigned long long)phy->att_dev_sas_addr, + (unsigned long long)phy->dev_sas_addr); + dev_printk(KERN_DEBUG, &pdev->dev, + "Rate = %x , type = %d\n", + sas_phy->linkrate, phy->phy_type); + + /* workaround for HW phy decoding error on 1.5g disk drive */ + mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); + tmp = mvs_read_port_vsr_data(mvi, i); + if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> + PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == + SAS_LINK_RATE_1_5_GBPS) + tmp &= ~PHY_MODE6_LATECLK; + else + tmp |= PHY_MODE6_LATECLK; + mvs_write_port_vsr_data(mvi, i, tmp); + + } +out_done: + if (get_st) + mvs_write_port_irq_stat(mvi, i, phy->irq_status); +} + +static void mvs_port_formed(struct asd_sas_phy *sas_phy) +{ + struct sas_ha_struct *sas_ha = sas_phy->ha; + struct mvs_info *mvi = sas_ha->lldd_ha; + struct asd_sas_port *sas_port = sas_phy->port; + struct mvs_phy *phy = sas_phy->lldd_phy; + struct mvs_port *port = &mvi->port[sas_port->id]; + unsigned long flags; + + spin_lock_irqsave(&mvi->lock, flags); + port->port_attached = 1; + phy->port = port; + port->taskfileset = MVS_ID_NOT_MAPPED; + if (phy->phy_type & PORT_TYPE_SAS) { + port->wide_port_phymap = sas_port->phy_mask; + mvs_update_wideport(mvi, sas_phy->id); + } + spin_unlock_irqrestore(&mvi->lock, flags); +} + +static int mvs_I_T_nexus_reset(struct domain_device *dev) +{ + return TMF_RESP_FUNC_FAILED; +} + +static int __devinit mvs_hw_init(struct mvs_info *mvi) +{ + void __iomem *regs = mvi->regs; + int i; + u32 tmp, cctl; + + /* make sure interrupts are masked immediately (paranoia) */ + mw32(GBL_CTL, 0); + tmp = mr32(GBL_CTL); + + /* Reset Controller */ + if (!(tmp & HBA_RST)) { + if (mvi->flags & MVF_PHY_PWR_FIX) { + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); + tmp &= ~PCTL_PWR_ON; + tmp |= PCTL_OFF; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); + + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); + tmp &= ~PCTL_PWR_ON; + tmp |= PCTL_OFF; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); + } + + /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */ + mw32_f(GBL_CTL, HBA_RST); + } + + /* wait for reset to finish; timeout is just a guess */ + i = 1000; + while (i-- > 0) { + msleep(10); + + if (!(mr32(GBL_CTL) & HBA_RST)) + break; + } + if (mr32(GBL_CTL) & HBA_RST) { + dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n"); + return -EBUSY; + } + + /* Init Chip */ + /* make sure RST is set; HBA_RST /should/ have done that for us */ + cctl = mr32(CTL); + if (cctl & CCTL_RST) + cctl &= ~CCTL_RST; + else + mw32_f(CTL, cctl | CCTL_RST); + + /* write to device control _AND_ device status register? - A.C. */ + pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp); + tmp &= ~PRD_REQ_MASK; + tmp |= PRD_REQ_SIZE; + pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp); + + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); + tmp |= PCTL_PWR_ON; + tmp &= ~PCTL_OFF; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); + + pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); + tmp |= PCTL_PWR_ON; + tmp &= ~PCTL_OFF; + pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); + + mw32_f(CTL, cctl); + + /* reset control */ + mw32(PCS, 0); /*MVS_PCS */ + + mvs_phy_hacks(mvi); + + mw32(CMD_LIST_LO, mvi->slot_dma); + mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); + + mw32(RX_FIS_LO, mvi->rx_fis_dma); + mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); + + mw32(TX_CFG, MVS_CHIP_SLOT_SZ); + mw32(TX_LO, mvi->tx_dma); + mw32(TX_HI, (mvi->tx_dma >> 16) >> 16); + + mw32(RX_CFG, MVS_RX_RING_SZ); + mw32(RX_LO, mvi->rx_dma); + mw32(RX_HI, (mvi->rx_dma >> 16) >> 16); + + /* enable auto port detection */ + mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN); + msleep(1100); + /* init and reset phys */ + for (i = 0; i < mvi->chip->n_phy; i++) { + u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]); + u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]); + + mvs_detect_porttype(mvi, i); + + /* set phy local SAS address */ + mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO); + mvs_write_port_cfg_data(mvi, i, lo); + mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI); + mvs_write_port_cfg_data(mvi, i, hi); + + /* reset phy */ + tmp = mvs_read_phy_ctl(mvi, i); + tmp |= PHY_RST; + mvs_write_phy_ctl(mvi, i, tmp); + } + + msleep(100); + + for (i = 0; i < mvi->chip->n_phy; i++) { + /* clear phy int status */ + tmp = mvs_read_port_irq_stat(mvi, i); + tmp &= ~PHYEV_SIG_FIS; + mvs_write_port_irq_stat(mvi, i, tmp); + + /* set phy int mask */ + tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS | + PHYEV_ID_DONE | PHYEV_DEC_ERR; + mvs_write_port_irq_mask(mvi, i, tmp); + + msleep(100); + mvs_update_phyinfo(mvi, i, 1); + mvs_enable_xmt(mvi, i); + } + + /* FIXME: update wide port bitmaps */ + + /* little endian for open address and command table, etc. */ + /* A.C. + * it seems that ( from the spec ) turning on big-endian won't + * do us any good on big-endian machines, need further confirmation + */ + cctl = mr32(CTL); + cctl |= CCTL_ENDIAN_CMD; + cctl |= CCTL_ENDIAN_DATA; + cctl &= ~CCTL_ENDIAN_OPEN; + cctl |= CCTL_ENDIAN_RSP; + mw32_f(CTL, cctl); + + /* reset CMD queue */ + tmp = mr32(PCS); + tmp |= PCS_CMD_RST; + mw32(PCS, tmp); + /* interrupt coalescing may cause missing HW interrput in some case, + * and the max count is 0x1ff, while our max slot is 0x200, + * it will make count 0. + */ + tmp = 0; + mw32(INT_COAL, tmp); + + tmp = 0x100; + mw32(INT_COAL_TMOUT, tmp); + + /* ladies and gentlemen, start your engines */ + mw32(TX_CFG, 0); + mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); + mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN); + /* enable CMD/CMPL_Q/RESP mode */ + mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN); + + /* enable completion queue interrupt */ + tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS); + mw32(INT_MASK, tmp); + + /* Enable SRS interrupt */ + mw32(INT_MASK_SRS, 0xFF); + return 0; +} + +static void __devinit mvs_print_info(struct mvs_info *mvi) +{ + struct pci_dev *pdev = mvi->pdev; + static int printed_version; + + if (!printed_version++) + dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); + + dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n", + mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr)); +} + +static int __devinit mvs_pci_init(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int rc; + struct mvs_info *mvi; + irq_handler_t irq_handler = mvs_interrupt; + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + pci_set_master(pdev); + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out_disable; + + rc = pci_go_64(pdev); + if (rc) + goto err_out_regions; + + mvi = mvs_alloc(pdev, ent); + if (!mvi) { + rc = -ENOMEM; + goto err_out_regions; + } + + rc = mvs_hw_init(mvi); + if (rc) + goto err_out_mvi; + +#ifndef MVS_DISABLE_MSI + if (!pci_enable_msi(pdev)) { + u32 tmp; + void __iomem *regs = mvi->regs; + mvi->flags |= MVF_MSI; + irq_handler = mvs_msi_interrupt; + tmp = mr32(PCS); + mw32(PCS, tmp | PCS_SELF_CLEAR); + } +#endif + + rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi); + if (rc) + goto err_out_msi; + + rc = scsi_add_host(mvi->shost, &pdev->dev); + if (rc) + goto err_out_irq; + + rc = sas_register_ha(&mvi->sas); + if (rc) + goto err_out_shost; + + pci_set_drvdata(pdev, mvi); + + mvs_print_info(mvi); + + mvs_hba_interrupt_enable(mvi); + + scsi_scan_host(mvi->shost); + + return 0; + +err_out_shost: + scsi_remove_host(mvi->shost); +err_out_irq: + free_irq(pdev->irq, mvi); +err_out_msi: + if (mvi->flags |= MVF_MSI) + pci_disable_msi(pdev); +err_out_mvi: + mvs_free(mvi); +err_out_regions: + pci_release_regions(pdev); +err_out_disable: + pci_disable_device(pdev); + return rc; +} + +static void __devexit mvs_pci_remove(struct pci_dev *pdev) +{ + struct mvs_info *mvi = pci_get_drvdata(pdev); + + pci_set_drvdata(pdev, NULL); + + if (mvi) { + sas_unregister_ha(&mvi->sas); + mvs_hba_interrupt_disable(mvi); + sas_remove_host(mvi->shost); + scsi_remove_host(mvi->shost); + + free_irq(pdev->irq, mvi); + if (mvi->flags & MVF_MSI) + pci_disable_msi(pdev); + mvs_free(mvi); + pci_release_regions(pdev); + } + pci_disable_device(pdev); +} + +static struct sas_domain_function_template mvs_transport_ops = { + .lldd_execute_task = mvs_task_exec, + .lldd_control_phy = mvs_phy_control, + .lldd_abort_task = mvs_task_abort, + .lldd_port_formed = mvs_port_formed, + .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, +}; + +static struct pci_device_id __devinitdata mvs_pci_table[] = { + { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, + { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, + { + .vendor = PCI_VENDOR_ID_MARVELL, + .device = 0x6440, + .subvendor = PCI_ANY_ID, + .subdevice = 0x6480, + .class = 0, + .class_mask = 0, + .driver_data = chip_6480, + }, + { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, + { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 }, + + { } /* terminate list */ +}; + +static struct pci_driver mvs_pci_driver = { + .name = DRV_NAME, + .id_table = mvs_pci_table, + .probe = mvs_pci_init, + .remove = __devexit_p(mvs_pci_remove), +}; + +static int __init mvs_init(void) +{ + int rc; + + mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); + if (!mvs_stt) + return -ENOMEM; + + rc = pci_register_driver(&mvs_pci_driver); + if (rc) + goto err_out; + + return 0; + +err_out: + sas_release_transport(mvs_stt); + return rc; +} + +static void __exit mvs_exit(void) +{ + pci_unregister_driver(&mvs_pci_driver); + sas_release_transport(mvs_stt); +} + +module_init(mvs_init); +module_exit(mvs_exit); + +MODULE_AUTHOR("Jeff Garzik "); +MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, mvs_pci_table); diff --git a/trunk/drivers/scsi/mvsas/Kconfig b/trunk/drivers/scsi/mvsas/Kconfig deleted file mode 100644 index 6de7af27e507..000000000000 --- a/trunk/drivers/scsi/mvsas/Kconfig +++ /dev/null @@ -1,42 +0,0 @@ -# -# Kernel configuration file for 88SE64XX/88SE94XX SAS/SATA driver. -# -# Copyright 2007 Red Hat, Inc. -# Copyright 2008 Marvell. -# -# This file is licensed under GPLv2. -# -# This file is part of the 88SE64XX/88SE94XX driver. -# -# The 88SE64XX/88SE94XX driver is free software; you can redistribute -# it and/or modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; version 2 of the -# License. -# -# The 88SE64XX/88SE94XX driver is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with 88SE64XX/88SE94XX Driver; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# -# - -config SCSI_MVSAS - tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support" - depends on PCI - select SCSI_SAS_LIBSAS - select FW_LOADER - help - This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX and 6Gb/s - PCI-E 88SE94XX chip based host adapters. - -config SCSI_MVSAS_DEBUG - bool "Compile in debug mode" - default y - depends on SCSI_MVSAS - help - Compiles the 88SE64XX/88SE94XX driver in debug mode. In debug mode, - the driver prints some messages to the console. diff --git a/trunk/drivers/scsi/mvsas/Makefile b/trunk/drivers/scsi/mvsas/Makefile deleted file mode 100644 index 52ac4264677d..000000000000 --- a/trunk/drivers/scsi/mvsas/Makefile +++ /dev/null @@ -1,32 +0,0 @@ -# -# Makefile for Marvell 88SE64xx/88SE84xx SAS/SATA driver. -# -# Copyright 2007 Red Hat, Inc. -# Copyright 2008 Marvell. -# -# This file is licensed under GPLv2. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation; version 2 of the -# License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 -# USA - -ifeq ($(CONFIG_SCSI_MVSAS_DEBUG),y) - EXTRA_CFLAGS += -DMV_DEBUG -endif - -obj-$(CONFIG_SCSI_MVSAS) += mvsas.o -mvsas-y += mv_init.o \ - mv_sas.o \ - mv_64xx.o \ - mv_94xx.o diff --git a/trunk/drivers/scsi/mvsas/mv_64xx.c b/trunk/drivers/scsi/mvsas/mv_64xx.c deleted file mode 100644 index 10a5077b6aed..000000000000 --- a/trunk/drivers/scsi/mvsas/mv_64xx.c +++ /dev/null @@ -1,793 +0,0 @@ -/* - * Marvell 88SE64xx hardware specific - * - * Copyright 2007 Red Hat, Inc. - * Copyright 2008 Marvell. - * - * This file is licensed under GPLv2. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; version 2 of the - * License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA -*/ - -#include "mv_sas.h" -#include "mv_64xx.h" -#include "mv_chips.h" - -static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i) -{ - void __iomem *regs = mvi->regs; - u32 reg; - struct mvs_phy *phy = &mvi->phy[i]; - - /* TODO check & save device type */ - reg = mr32(MVS_GBL_PORT_TYPE); - phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); - if (reg & MODE_SAS_SATA & (1 << i)) - phy->phy_type |= PORT_TYPE_SAS; - else - phy->phy_type |= PORT_TYPE_SATA; -} - -static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - - tmp = mr32(MVS_PCS); - if (mvi->chip->n_phy <= 4) - tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT); - else - tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2); - mw32(MVS_PCS, tmp); -} - -static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - - mvs_phy_hacks(mvi); - - if (!(mvi->flags & MVF_FLAG_SOC)) { - /* TEST - for phy decoding error, adjust voltage levels */ - mw32(MVS_P0_VSR_ADDR + 0, 0x8); - mw32(MVS_P0_VSR_DATA + 0, 0x2F0); - - mw32(MVS_P0_VSR_ADDR + 8, 0x8); - mw32(MVS_P0_VSR_DATA + 8, 0x2F0); - - mw32(MVS_P0_VSR_ADDR + 16, 0x8); - mw32(MVS_P0_VSR_DATA + 16, 0x2F0); - - mw32(MVS_P0_VSR_ADDR + 24, 0x8); - mw32(MVS_P0_VSR_DATA + 24, 0x2F0); - } else { - int i; - /* disable auto port detection */ - mw32(MVS_GBL_PORT_TYPE, 0); - for (i = 0; i < mvi->chip->n_phy; i++) { - mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7); - mvs_write_port_vsr_data(mvi, i, 0x90000000); - mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9); - mvs_write_port_vsr_data(mvi, i, 0x50f2); - mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11); - mvs_write_port_vsr_data(mvi, i, 0x0e); - } - } -} - -static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id) -{ - void __iomem *regs = mvi->regs; - u32 reg, tmp; - - if (!(mvi->flags & MVF_FLAG_SOC)) { - if (phy_id < 4) - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, ®); - else - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, ®); - - } else - reg = mr32(MVS_PHY_CTL); - - tmp = reg; - if (phy_id < 4) - tmp |= (1U << phy_id) << PCTL_LINK_OFFS; - else - tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS; - - if (!(mvi->flags & MVF_FLAG_SOC)) { - if (phy_id < 4) { - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); - mdelay(10); - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg); - } else { - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); - mdelay(10); - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg); - } - } else { - mw32(MVS_PHY_CTL, tmp); - mdelay(10); - mw32(MVS_PHY_CTL, reg); - } -} - -static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) -{ - u32 tmp; - tmp = mvs_read_port_irq_stat(mvi, phy_id); - tmp &= ~PHYEV_RDY_CH; - mvs_write_port_irq_stat(mvi, phy_id, tmp); - tmp = mvs_read_phy_ctl(mvi, phy_id); - if (hard) - tmp |= PHY_RST_HARD; - else - tmp |= PHY_RST; - mvs_write_phy_ctl(mvi, phy_id, tmp); - if (hard) { - do { - tmp = mvs_read_phy_ctl(mvi, phy_id); - } while (tmp & PHY_RST_HARD); - } -} - -static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - int i; - - /* make sure interrupts are masked immediately (paranoia) */ - mw32(MVS_GBL_CTL, 0); - tmp = mr32(MVS_GBL_CTL); - - /* Reset Controller */ - if (!(tmp & HBA_RST)) { - if (mvi->flags & MVF_PHY_PWR_FIX) { - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); - tmp &= ~PCTL_PWR_OFF; - tmp |= PCTL_PHY_DSBL; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); - - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); - tmp &= ~PCTL_PWR_OFF; - tmp |= PCTL_PHY_DSBL; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); - } - } - - /* make sure interrupts are masked immediately (paranoia) */ - mw32(MVS_GBL_CTL, 0); - tmp = mr32(MVS_GBL_CTL); - - /* Reset Controller */ - if (!(tmp & HBA_RST)) { - /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */ - mw32_f(MVS_GBL_CTL, HBA_RST); - } - - /* wait for reset to finish; timeout is just a guess */ - i = 1000; - while (i-- > 0) { - msleep(10); - - if (!(mr32(MVS_GBL_CTL) & HBA_RST)) - break; - } - if (mr32(MVS_GBL_CTL) & HBA_RST) { - dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n"); - return -EBUSY; - } - return 0; -} - -static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - if (!(mvi->flags & MVF_FLAG_SOC)) { - u32 offs; - if (phy_id < 4) - offs = PCR_PHY_CTL; - else { - offs = PCR_PHY_CTL2; - phy_id -= 4; - } - pci_read_config_dword(mvi->pdev, offs, &tmp); - tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id); - pci_write_config_dword(mvi->pdev, offs, tmp); - } else { - tmp = mr32(MVS_PHY_CTL); - tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id); - mw32(MVS_PHY_CTL, tmp); - } -} - -static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - if (!(mvi->flags & MVF_FLAG_SOC)) { - u32 offs; - if (phy_id < 4) - offs = PCR_PHY_CTL; - else { - offs = PCR_PHY_CTL2; - phy_id -= 4; - } - pci_read_config_dword(mvi->pdev, offs, &tmp); - tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id)); - pci_write_config_dword(mvi->pdev, offs, tmp); - } else { - tmp = mr32(MVS_PHY_CTL); - tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id)); - mw32(MVS_PHY_CTL, tmp); - } -} - -static int __devinit mvs_64xx_init(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - int i; - u32 tmp, cctl; - - if (mvi->pdev && mvi->pdev->revision == 0) - mvi->flags |= MVF_PHY_PWR_FIX; - if (!(mvi->flags & MVF_FLAG_SOC)) { - mvs_show_pcie_usage(mvi); - tmp = mvs_64xx_chip_reset(mvi); - if (tmp) - return tmp; - } else { - tmp = mr32(MVS_PHY_CTL); - tmp &= ~PCTL_PWR_OFF; - tmp |= PCTL_PHY_DSBL; - mw32(MVS_PHY_CTL, tmp); - } - - /* Init Chip */ - /* make sure RST is set; HBA_RST /should/ have done that for us */ - cctl = mr32(MVS_CTL) & 0xFFFF; - if (cctl & CCTL_RST) - cctl &= ~CCTL_RST; - else - mw32_f(MVS_CTL, cctl | CCTL_RST); - - if (!(mvi->flags & MVF_FLAG_SOC)) { - /* write to device control _AND_ device status register */ - pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp); - tmp &= ~PRD_REQ_MASK; - tmp |= PRD_REQ_SIZE; - pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp); - - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp); - tmp &= ~PCTL_PWR_OFF; - tmp &= ~PCTL_PHY_DSBL; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp); - - pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp); - tmp &= PCTL_PWR_OFF; - tmp &= ~PCTL_PHY_DSBL; - pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp); - } else { - tmp = mr32(MVS_PHY_CTL); - tmp &= ~PCTL_PWR_OFF; - tmp |= PCTL_COM_ON; - tmp &= ~PCTL_PHY_DSBL; - tmp |= PCTL_LINK_RST; - mw32(MVS_PHY_CTL, tmp); - msleep(100); - tmp &= ~PCTL_LINK_RST; - mw32(MVS_PHY_CTL, tmp); - msleep(100); - } - - /* reset control */ - mw32(MVS_PCS, 0); /* MVS_PCS */ - /* init phys */ - mvs_64xx_phy_hacks(mvi); - - /* enable auto port detection */ - mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN); - - mw32(MVS_CMD_LIST_LO, mvi->slot_dma); - mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); - - mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma); - mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); - - mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ); - mw32(MVS_TX_LO, mvi->tx_dma); - mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16); - - mw32(MVS_RX_CFG, MVS_RX_RING_SZ); - mw32(MVS_RX_LO, mvi->rx_dma); - mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16); - - for (i = 0; i < mvi->chip->n_phy; i++) { - /* set phy local SAS address */ - /* should set little endian SAS address to 64xx chip */ - mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI, - cpu_to_be64(mvi->phy[i].dev_sas_addr)); - - mvs_64xx_enable_xmt(mvi, i); - - mvs_64xx_phy_reset(mvi, i, 1); - msleep(500); - mvs_64xx_detect_porttype(mvi, i); - } - if (mvi->flags & MVF_FLAG_SOC) { - /* set select registers */ - writel(0x0E008000, regs + 0x000); - writel(0x59000008, regs + 0x004); - writel(0x20, regs + 0x008); - writel(0x20, regs + 0x00c); - writel(0x20, regs + 0x010); - writel(0x20, regs + 0x014); - writel(0x20, regs + 0x018); - writel(0x20, regs + 0x01c); - } - for (i = 0; i < mvi->chip->n_phy; i++) { - /* clear phy int status */ - tmp = mvs_read_port_irq_stat(mvi, i); - tmp &= ~PHYEV_SIG_FIS; - mvs_write_port_irq_stat(mvi, i, tmp); - - /* set phy int mask */ - tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS | - PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR | - PHYEV_DEC_ERR; - mvs_write_port_irq_mask(mvi, i, tmp); - - msleep(100); - mvs_update_phyinfo(mvi, i, 1); - } - - /* FIXME: update wide port bitmaps */ - - /* little endian for open address and command table, etc. */ - /* - * it seems that ( from the spec ) turning on big-endian won't - * do us any good on big-endian machines, need further confirmation - */ - cctl = mr32(MVS_CTL); - cctl |= CCTL_ENDIAN_CMD; - cctl |= CCTL_ENDIAN_DATA; - cctl &= ~CCTL_ENDIAN_OPEN; - cctl |= CCTL_ENDIAN_RSP; - mw32_f(MVS_CTL, cctl); - - /* reset CMD queue */ - tmp = mr32(MVS_PCS); - tmp |= PCS_CMD_RST; - mw32(MVS_PCS, tmp); - /* interrupt coalescing may cause missing HW interrput in some case, - * and the max count is 0x1ff, while our max slot is 0x200, - * it will make count 0. - */ - tmp = 0; - mw32(MVS_INT_COAL, tmp); - - tmp = 0x100; - mw32(MVS_INT_COAL_TMOUT, tmp); - - /* ladies and gentlemen, start your engines */ - mw32(MVS_TX_CFG, 0); - mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); - mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN); - /* enable CMD/CMPL_Q/RESP mode */ - mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | - PCS_CMD_EN | PCS_CMD_STOP_ERR); - - /* enable completion queue interrupt */ - tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP | - CINT_DMA_PCIE); - - mw32(MVS_INT_MASK, tmp); - - /* Enable SRS interrupt */ - mw32(MVS_INT_MASK_SRS_0, 0xFFFF); - - return 0; -} - -static int mvs_64xx_ioremap(struct mvs_info *mvi) -{ - if (!mvs_ioremap(mvi, 4, 2)) - return 0; - return -1; -} - -static void mvs_64xx_iounmap(struct mvs_info *mvi) -{ - mvs_iounmap(mvi->regs); - mvs_iounmap(mvi->regs_ex); -} - -static void mvs_64xx_interrupt_enable(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - - tmp = mr32(MVS_GBL_CTL); - mw32(MVS_GBL_CTL, tmp | INT_EN); -} - -static void mvs_64xx_interrupt_disable(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - - tmp = mr32(MVS_GBL_CTL); - mw32(MVS_GBL_CTL, tmp & ~INT_EN); -} - -static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq) -{ - void __iomem *regs = mvi->regs; - u32 stat; - - if (!(mvi->flags & MVF_FLAG_SOC)) { - stat = mr32(MVS_GBL_INT_STAT); - - if (stat == 0 || stat == 0xffffffff) - return 0; - } else - stat = 1; - return stat; -} - -static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat) -{ - void __iomem *regs = mvi->regs; - - /* clear CMD_CMPLT ASAP */ - mw32_f(MVS_INT_STAT, CINT_DONE); -#ifndef MVS_USE_TASKLET - spin_lock(&mvi->lock); -#endif - mvs_int_full(mvi); -#ifndef MVS_USE_TASKLET - spin_unlock(&mvi->lock); -#endif - return IRQ_HANDLED; -} - -static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx) -{ - u32 tmp; - mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32)); - mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32)); - do { - tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3)); - } while (tmp & 1 << (slot_idx % 32)); - do { - tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3)); - } while (tmp & 1 << (slot_idx % 32)); -} - -static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, - u32 tfs) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - - if (type == PORT_TYPE_SATA) { - tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs); - mw32(MVS_INT_STAT_SRS_0, tmp); - } - mw32(MVS_INT_STAT, CINT_CI_STOP); - tmp = mr32(MVS_PCS) | 0xFF00; - mw32(MVS_PCS, tmp); -} - -static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs) -{ - void __iomem *regs = mvi->regs; - u32 tmp, offs; - - if (*tfs == MVS_ID_NOT_MAPPED) - return; - - offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT); - if (*tfs < 16) { - tmp = mr32(MVS_PCS); - mw32(MVS_PCS, tmp & ~offs); - } else { - tmp = mr32(MVS_CTL); - mw32(MVS_CTL, tmp & ~offs); - } - - tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs); - if (tmp) - mw32(MVS_INT_STAT_SRS_0, tmp); - - *tfs = MVS_ID_NOT_MAPPED; - return; -} - -static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs) -{ - int i; - u32 tmp, offs; - void __iomem *regs = mvi->regs; - - if (*tfs != MVS_ID_NOT_MAPPED) - return 0; - - tmp = mr32(MVS_PCS); - - for (i = 0; i < mvi->chip->srs_sz; i++) { - if (i == 16) - tmp = mr32(MVS_CTL); - offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT); - if (!(tmp & offs)) { - *tfs = i; - - if (i < 16) - mw32(MVS_PCS, tmp | offs); - else - mw32(MVS_CTL, tmp | offs); - tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i); - if (tmp) - mw32(MVS_INT_STAT_SRS_0, tmp); - return 0; - } - } - return MVS_ID_NOT_MAPPED; -} - -void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd) -{ - int i; - struct scatterlist *sg; - struct mvs_prd *buf_prd = prd; - for_each_sg(scatter, sg, nr, i) { - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); - buf_prd->len = cpu_to_le32(sg_dma_len(sg)); - buf_prd++; - } -} - -static int mvs_64xx_oob_done(struct mvs_info *mvi, int i) -{ - u32 phy_st; - mvs_write_port_cfg_addr(mvi, i, - PHYR_PHY_STAT); - phy_st = mvs_read_port_cfg_data(mvi, i); - if (phy_st & PHY_OOB_DTCTD) - return 1; - return 0; -} - -static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i, - struct sas_identify_frame *id) - -{ - struct mvs_phy *phy = &mvi->phy[i]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - - sas_phy->linkrate = - (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; - - phy->minimum_linkrate = - (phy->phy_status & - PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8; - phy->maximum_linkrate = - (phy->phy_status & - PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12; - - mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY); - phy->dev_info = mvs_read_port_cfg_data(mvi, i); - - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO); - phy->att_dev_info = mvs_read_port_cfg_data(mvi, i); - - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI); - phy->att_dev_sas_addr = - (u64) mvs_read_port_cfg_data(mvi, i) << 32; - mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO); - phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i); - phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr); -} - -static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i) -{ - u32 tmp; - struct mvs_phy *phy = &mvi->phy[i]; - /* workaround for HW phy decoding error on 1.5g disk drive */ - mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6); - tmp = mvs_read_port_vsr_data(mvi, i); - if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) == - SAS_LINK_RATE_1_5_GBPS) - tmp &= ~PHY_MODE6_LATECLK; - else - tmp |= PHY_MODE6_LATECLK; - mvs_write_port_vsr_data(mvi, i, tmp); -} - -void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, - struct sas_phy_linkrates *rates) -{ - u32 lrmin = 0, lrmax = 0; - u32 tmp; - - tmp = mvs_read_phy_ctl(mvi, phy_id); - lrmin = (rates->minimum_linkrate << 8); - lrmax = (rates->maximum_linkrate << 12); - - if (lrmin) { - tmp &= ~(0xf << 8); - tmp |= lrmin; - } - if (lrmax) { - tmp &= ~(0xf << 12); - tmp |= lrmax; - } - mvs_write_phy_ctl(mvi, phy_id, tmp); - mvs_64xx_phy_reset(mvi, phy_id, 1); -} - -static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi) -{ - u32 tmp; - void __iomem *regs = mvi->regs; - tmp = mr32(MVS_PCS); - mw32(MVS_PCS, tmp & 0xFFFF); - mw32(MVS_PCS, tmp); - tmp = mr32(MVS_CTL); - mw32(MVS_CTL, tmp & 0xFFFF); - mw32(MVS_CTL, tmp); -} - - -u32 mvs_64xx_spi_read_data(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs_ex; - return ior32(SPI_DATA_REG_64XX); -} - -void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data) -{ - void __iomem *regs = mvi->regs_ex; - iow32(SPI_DATA_REG_64XX, data); -} - - -int mvs_64xx_spi_buildcmd(struct mvs_info *mvi, - u32 *dwCmd, - u8 cmd, - u8 read, - u8 length, - u32 addr - ) -{ - u32 dwTmp; - - dwTmp = ((u32)cmd << 24) | ((u32)length << 19); - if (read) - dwTmp |= 1U<<23; - - if (addr != MV_MAX_U32) { - dwTmp |= 1U<<22; - dwTmp |= (addr & 0x0003FFFF); - } - - *dwCmd = dwTmp; - return 0; -} - - -int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd) -{ - void __iomem *regs = mvi->regs_ex; - int retry; - - for (retry = 0; retry < 1; retry++) { - iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE); - iow32(SPI_CMD_REG_64XX, cmd); - iow32(SPI_CTRL_REG_64XX, - SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART); - } - - return 0; -} - -int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout) -{ - void __iomem *regs = mvi->regs_ex; - u32 i, dwTmp; - - for (i = 0; i < timeout; i++) { - dwTmp = ior32(SPI_CTRL_REG_64XX); - if (!(dwTmp & SPI_CTRL_SPISTART)) - return 0; - msleep(10); - } - - return -1; -} - -#ifndef DISABLE_HOTPLUG_DMA_FIX -void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd) -{ - int i; - struct mvs_prd *buf_prd = prd; - buf_prd += from; - for (i = 0; i < MAX_SG_ENTRY - from; i++) { - buf_prd->addr = cpu_to_le64(buf_dma); - buf_prd->len = cpu_to_le32(buf_len); - ++buf_prd; - } -} -#endif - -const struct mvs_dispatch mvs_64xx_dispatch = { - "mv64xx", - mvs_64xx_init, - NULL, - mvs_64xx_ioremap, - mvs_64xx_iounmap, - mvs_64xx_isr, - mvs_64xx_isr_status, - mvs_64xx_interrupt_enable, - mvs_64xx_interrupt_disable, - mvs_read_phy_ctl, - mvs_write_phy_ctl, - mvs_read_port_cfg_data, - mvs_write_port_cfg_data, - mvs_write_port_cfg_addr, - mvs_read_port_vsr_data, - mvs_write_port_vsr_data, - mvs_write_port_vsr_addr, - mvs_read_port_irq_stat, - mvs_write_port_irq_stat, - mvs_read_port_irq_mask, - mvs_write_port_irq_mask, - mvs_get_sas_addr, - mvs_64xx_command_active, - mvs_64xx_issue_stop, - mvs_start_delivery, - mvs_rx_update, - mvs_int_full, - mvs_64xx_assign_reg_set, - mvs_64xx_free_reg_set, - mvs_get_prd_size, - mvs_get_prd_count, - mvs_64xx_make_prd, - mvs_64xx_detect_porttype, - mvs_64xx_oob_done, - mvs_64xx_fix_phy_info, - mvs_64xx_phy_work_around, - mvs_64xx_phy_set_link_rate, - mvs_hw_max_link_rate, - mvs_64xx_phy_disable, - mvs_64xx_phy_enable, - mvs_64xx_phy_reset, - mvs_64xx_stp_reset, - mvs_64xx_clear_active_cmds, - mvs_64xx_spi_read_data, - mvs_64xx_spi_write_data, - mvs_64xx_spi_buildcmd, - mvs_64xx_spi_issuecmd, - mvs_64xx_spi_waitdataready, -#ifndef DISABLE_HOTPLUG_DMA_FIX - mvs_64xx_fix_dma, -#endif -}; - diff --git a/trunk/drivers/scsi/mvsas/mv_64xx.h b/trunk/drivers/scsi/mvsas/mv_64xx.h deleted file mode 100644 index 42e947d9795e..000000000000 --- a/trunk/drivers/scsi/mvsas/mv_64xx.h +++ /dev/null @@ -1,151 +0,0 @@ -/* - * Marvell 88SE64xx hardware specific head file - * - * Copyright 2007 Red Hat, Inc. - * Copyright 2008 Marvell. - * - * This file is licensed under GPLv2. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; version 2 of the - * License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA -*/ - -#ifndef _MVS64XX_REG_H_ -#define _MVS64XX_REG_H_ - -#include - -#define MAX_LINK_RATE SAS_LINK_RATE_3_0_GBPS - -/* enhanced mode registers (BAR4) */ -enum hw_registers { - MVS_GBL_CTL = 0x04, /* global control */ - MVS_GBL_INT_STAT = 0x08, /* global irq status */ - MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ - - MVS_PHY_CTL = 0x40, /* SOC PHY Control */ - MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */ - - MVS_GBL_PORT_TYPE = 0xa0, /* port type */ - - MVS_CTL = 0x100, /* SAS/SATA port configuration */ - MVS_PCS = 0x104, /* SAS/SATA port control/status */ - MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ - MVS_CMD_LIST_HI = 0x10C, - MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ - MVS_RX_FIS_HI = 0x114, - - MVS_TX_CFG = 0x120, /* TX configuration */ - MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ - MVS_TX_HI = 0x128, - - MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ - MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ - MVS_RX_CFG = 0x134, /* RX configuration */ - MVS_RX_LO = 0x138, /* RX (completion) ring addr */ - MVS_RX_HI = 0x13C, - MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ - - MVS_INT_COAL = 0x148, /* Int coalescing config */ - MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ - MVS_INT_STAT = 0x150, /* Central int status */ - MVS_INT_MASK = 0x154, /* Central int enable */ - MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */ - MVS_INT_MASK_SRS_0 = 0x15C, - - /* ports 1-3 follow after this */ - MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */ - MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */ - /* ports 5-7 follow after this */ - MVS_P4_INT_STAT = 0x200, /* Port4 interrupt status */ - MVS_P4_INT_MASK = 0x204, /* Port4 interrupt enable mask */ - - /* ports 1-3 follow after this */ - MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */ - /* ports 5-7 follow after this */ - MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */ - - MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */ - MVS_CMD_DATA = 0x1BC, /* Command register port (data) */ - - /* ports 1-3 follow after this */ - MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */ - MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */ - /* ports 5-7 follow after this */ - MVS_P4_CFG_ADDR = 0x230, /* Port4 config address */ - MVS_P4_CFG_DATA = 0x234, /* Port4 config data */ - - /* ports 1-3 follow after this */ - MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */ - MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */ - /* ports 5-7 follow after this */ - MVS_P4_VSR_ADDR = 0x250, /* port4 VSR addr */ - MVS_P4_VSR_DATA = 0x254, /* port4 VSR data */ -}; - -enum pci_cfg_registers { - PCR_PHY_CTL = 0x40, - PCR_PHY_CTL2 = 0x90, - PCR_DEV_CTRL = 0xE8, - PCR_LINK_STAT = 0xF2, -}; - -/* SAS/SATA Vendor Specific Port Registers */ -enum sas_sata_vsp_regs { - VSR_PHY_STAT = 0x00, /* Phy Status */ - VSR_PHY_MODE1 = 0x01, /* phy tx */ - VSR_PHY_MODE2 = 0x02, /* tx scc */ - VSR_PHY_MODE3 = 0x03, /* pll */ - VSR_PHY_MODE4 = 0x04, /* VCO */ - VSR_PHY_MODE5 = 0x05, /* Rx */ - VSR_PHY_MODE6 = 0x06, /* CDR */ - VSR_PHY_MODE7 = 0x07, /* Impedance */ - VSR_PHY_MODE8 = 0x08, /* Voltage */ - VSR_PHY_MODE9 = 0x09, /* Test */ - VSR_PHY_MODE10 = 0x0A, /* Power */ - VSR_PHY_MODE11 = 0x0B, /* Phy Mode */ - VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */ - VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */ -}; - -enum chip_register_bits { - PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8), - PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12), - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16), - PHY_NEG_SPP_PHYS_LINK_RATE_MASK = - (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), -}; - -#define MAX_SG_ENTRY 64 - -struct mvs_prd { - __le64 addr; /* 64-bit buffer address */ - __le32 reserved; - __le32 len; /* 16-bit length */ -}; - -#define SPI_CTRL_REG 0xc0 -#define SPI_CTRL_VENDOR_ENABLE (1U<<29) -#define SPI_CTRL_SPIRDY (1U<<22) -#define SPI_CTRL_SPISTART (1U<<20) - -#define SPI_CMD_REG 0xc4 -#define SPI_DATA_REG 0xc8 - -#define SPI_CTRL_REG_64XX 0x10 -#define SPI_CMD_REG_64XX 0x14 -#define SPI_DATA_REG_64XX 0x18 - -#endif diff --git a/trunk/drivers/scsi/mvsas/mv_94xx.c b/trunk/drivers/scsi/mvsas/mv_94xx.c deleted file mode 100644 index 0940fae19d20..000000000000 --- a/trunk/drivers/scsi/mvsas/mv_94xx.c +++ /dev/null @@ -1,672 +0,0 @@ -/* - * Marvell 88SE94xx hardware specific - * - * Copyright 2007 Red Hat, Inc. - * Copyright 2008 Marvell. - * - * This file is licensed under GPLv2. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; version 2 of the - * License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA -*/ - -#include "mv_sas.h" -#include "mv_94xx.h" -#include "mv_chips.h" - -static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i) -{ - u32 reg; - struct mvs_phy *phy = &mvi->phy[i]; - u32 phy_status; - - mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3); - reg = mvs_read_port_vsr_data(mvi, i); - phy_status = ((reg & 0x3f0000) >> 16) & 0xff; - phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); - switch (phy_status) { - case 0x10: - phy->phy_type |= PORT_TYPE_SAS; - break; - case 0x1d: - default: - phy->phy_type |= PORT_TYPE_SATA; - break; - } -} - -static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - - tmp = mr32(MVS_PCS); - tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2); - mw32(MVS_PCS, tmp); -} - -static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard) -{ - u32 tmp; - - tmp = mvs_read_port_irq_stat(mvi, phy_id); - tmp &= ~PHYEV_RDY_CH; - mvs_write_port_irq_stat(mvi, phy_id, tmp); - if (hard) { - tmp = mvs_read_phy_ctl(mvi, phy_id); - tmp |= PHY_RST_HARD; - mvs_write_phy_ctl(mvi, phy_id, tmp); - do { - tmp = mvs_read_phy_ctl(mvi, phy_id); - } while (tmp & PHY_RST_HARD); - } else { - mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT); - tmp = mvs_read_port_vsr_data(mvi, phy_id); - tmp |= PHY_RST; - mvs_write_port_vsr_data(mvi, phy_id, tmp); - } -} - -static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id) -{ - u32 tmp; - mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); - tmp = mvs_read_port_vsr_data(mvi, phy_id); - mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000); -} - -static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id) -{ - mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4); - mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1); - mvs_write_port_vsr_addr(mvi, phy_id, 0x104); - mvs_write_port_vsr_data(mvi, phy_id, 0x00018080); - mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); - mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff); -} - -static int __devinit mvs_94xx_init(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - int i; - u32 tmp, cctl; - - mvs_show_pcie_usage(mvi); - if (mvi->flags & MVF_FLAG_SOC) { - tmp = mr32(MVS_PHY_CTL); - tmp &= ~PCTL_PWR_OFF; - tmp |= PCTL_PHY_DSBL; - mw32(MVS_PHY_CTL, tmp); - } - - /* Init Chip */ - /* make sure RST is set; HBA_RST /should/ have done that for us */ - cctl = mr32(MVS_CTL) & 0xFFFF; - if (cctl & CCTL_RST) - cctl &= ~CCTL_RST; - else - mw32_f(MVS_CTL, cctl | CCTL_RST); - - if (mvi->flags & MVF_FLAG_SOC) { - tmp = mr32(MVS_PHY_CTL); - tmp &= ~PCTL_PWR_OFF; - tmp |= PCTL_COM_ON; - tmp &= ~PCTL_PHY_DSBL; - tmp |= PCTL_LINK_RST; - mw32(MVS_PHY_CTL, tmp); - msleep(100); - tmp &= ~PCTL_LINK_RST; - mw32(MVS_PHY_CTL, tmp); - msleep(100); - } - - /* reset control */ - mw32(MVS_PCS, 0); /* MVS_PCS */ - mw32(MVS_STP_REG_SET_0, 0); - mw32(MVS_STP_REG_SET_1, 0); - - /* init phys */ - mvs_phy_hacks(mvi); - - /* disable Multiplexing, enable phy implemented */ - mw32(MVS_PORTS_IMP, 0xFF); - - - mw32(MVS_PA_VSR_ADDR, 0x00000104); - mw32(MVS_PA_VSR_PORT, 0x00018080); - mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8); - mw32(MVS_PA_VSR_PORT, 0x0084ffff); - - /* set LED blink when IO*/ - mw32(MVS_PA_VSR_ADDR, 0x00000030); - tmp = mr32(MVS_PA_VSR_PORT); - tmp &= 0xFFFF00FF; - tmp |= 0x00003300; - mw32(MVS_PA_VSR_PORT, tmp); - - mw32(MVS_CMD_LIST_LO, mvi->slot_dma); - mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16); - - mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma); - mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16); - - mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ); - mw32(MVS_TX_LO, mvi->tx_dma); - mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16); - - mw32(MVS_RX_CFG, MVS_RX_RING_SZ); - mw32(MVS_RX_LO, mvi->rx_dma); - mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16); - - for (i = 0; i < mvi->chip->n_phy; i++) { - mvs_94xx_phy_disable(mvi, i); - /* set phy local SAS address */ - mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4, - (mvi->phy[i].dev_sas_addr)); - - mvs_94xx_enable_xmt(mvi, i); - mvs_94xx_phy_enable(mvi, i); - - mvs_94xx_phy_reset(mvi, i, 1); - msleep(500); - mvs_94xx_detect_porttype(mvi, i); - } - - if (mvi->flags & MVF_FLAG_SOC) { - /* set select registers */ - writel(0x0E008000, regs + 0x000); - writel(0x59000008, regs + 0x004); - writel(0x20, regs + 0x008); - writel(0x20, regs + 0x00c); - writel(0x20, regs + 0x010); - writel(0x20, regs + 0x014); - writel(0x20, regs + 0x018); - writel(0x20, regs + 0x01c); - } - for (i = 0; i < mvi->chip->n_phy; i++) { - /* clear phy int status */ - tmp = mvs_read_port_irq_stat(mvi, i); - tmp &= ~PHYEV_SIG_FIS; - mvs_write_port_irq_stat(mvi, i, tmp); - - /* set phy int mask */ - tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | - PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ; - mvs_write_port_irq_mask(mvi, i, tmp); - - msleep(100); - mvs_update_phyinfo(mvi, i, 1); - } - - /* FIXME: update wide port bitmaps */ - - /* little endian for open address and command table, etc. */ - /* - * it seems that ( from the spec ) turning on big-endian won't - * do us any good on big-endian machines, need further confirmation - */ - cctl = mr32(MVS_CTL); - cctl |= CCTL_ENDIAN_CMD; - cctl |= CCTL_ENDIAN_DATA; - cctl &= ~CCTL_ENDIAN_OPEN; - cctl |= CCTL_ENDIAN_RSP; - mw32_f(MVS_CTL, cctl); - - /* reset CMD queue */ - tmp = mr32(MVS_PCS); - tmp |= PCS_CMD_RST; - mw32(MVS_PCS, tmp); - /* interrupt coalescing may cause missing HW interrput in some case, - * and the max count is 0x1ff, while our max slot is 0x200, - * it will make count 0. - */ - tmp = 0; - mw32(MVS_INT_COAL, tmp); - - tmp = 0x100; - mw32(MVS_INT_COAL_TMOUT, tmp); - - /* ladies and gentlemen, start your engines */ - mw32(MVS_TX_CFG, 0); - mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN); - mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN); - /* enable CMD/CMPL_Q/RESP mode */ - mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN | - PCS_CMD_EN | PCS_CMD_STOP_ERR); - - /* enable completion queue interrupt */ - tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP | - CINT_DMA_PCIE); - tmp |= CINT_PHY_MASK; - mw32(MVS_INT_MASK, tmp); - - /* Enable SRS interrupt */ - mw32(MVS_INT_MASK_SRS_0, 0xFFFF); - - return 0; -} - -static int mvs_94xx_ioremap(struct mvs_info *mvi) -{ - if (!mvs_ioremap(mvi, 2, -1)) { - mvi->regs_ex = mvi->regs + 0x10200; - mvi->regs += 0x20000; - if (mvi->id == 1) - mvi->regs += 0x4000; - return 0; - } - return -1; -} - -static void mvs_94xx_iounmap(struct mvs_info *mvi) -{ - if (mvi->regs) { - mvi->regs -= 0x20000; - if (mvi->id == 1) - mvi->regs -= 0x4000; - mvs_iounmap(mvi->regs); - } -} - -static void mvs_94xx_interrupt_enable(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs_ex; - u32 tmp; - - tmp = mr32(MVS_GBL_CTL); - tmp |= (IRQ_SAS_A | IRQ_SAS_B); - mw32(MVS_GBL_INT_STAT, tmp); - writel(tmp, regs + 0x0C); - writel(tmp, regs + 0x10); - writel(tmp, regs + 0x14); - writel(tmp, regs + 0x18); - mw32(MVS_GBL_CTL, tmp); -} - -static void mvs_94xx_interrupt_disable(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs_ex; - u32 tmp; - - tmp = mr32(MVS_GBL_CTL); - - tmp &= ~(IRQ_SAS_A | IRQ_SAS_B); - mw32(MVS_GBL_INT_STAT, tmp); - writel(tmp, regs + 0x0C); - writel(tmp, regs + 0x10); - writel(tmp, regs + 0x14); - writel(tmp, regs + 0x18); - mw32(MVS_GBL_CTL, tmp); -} - -static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq) -{ - void __iomem *regs = mvi->regs_ex; - u32 stat = 0; - if (!(mvi->flags & MVF_FLAG_SOC)) { - stat = mr32(MVS_GBL_INT_STAT); - - if (!(stat & (IRQ_SAS_A | IRQ_SAS_B))) - return 0; - } - return stat; -} - -static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat) -{ - void __iomem *regs = mvi->regs; - - if (((stat & IRQ_SAS_A) && mvi->id == 0) || - ((stat & IRQ_SAS_B) && mvi->id == 1)) { - mw32_f(MVS_INT_STAT, CINT_DONE); - #ifndef MVS_USE_TASKLET - spin_lock(&mvi->lock); - #endif - mvs_int_full(mvi); - #ifndef MVS_USE_TASKLET - spin_unlock(&mvi->lock); - #endif - } - return IRQ_HANDLED; -} - -static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx) -{ - u32 tmp; - mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32)); - do { - tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3)); - } while (tmp & 1 << (slot_idx % 32)); -} - -static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type, - u32 tfs) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - - if (type == PORT_TYPE_SATA) { - tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs); - mw32(MVS_INT_STAT_SRS_0, tmp); - } - mw32(MVS_INT_STAT, CINT_CI_STOP); - tmp = mr32(MVS_PCS) | 0xFF00; - mw32(MVS_PCS, tmp); -} - -static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs) -{ - void __iomem *regs = mvi->regs; - u32 tmp; - u8 reg_set = *tfs; - - if (*tfs == MVS_ID_NOT_MAPPED) - return; - - mvi->sata_reg_set &= ~bit(reg_set); - if (reg_set < 32) { - w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set); - tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set; - if (tmp) - mw32(MVS_INT_STAT_SRS_0, tmp); - } else { - w_reg_set_enable(reg_set, mvi->sata_reg_set); - tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set; - if (tmp) - mw32(MVS_INT_STAT_SRS_1, tmp); - } - - *tfs = MVS_ID_NOT_MAPPED; - - return; -} - -static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs) -{ - int i; - void __iomem *regs = mvi->regs; - - if (*tfs != MVS_ID_NOT_MAPPED) - return 0; - - i = mv_ffc64(mvi->sata_reg_set); - if (i > 32) { - mvi->sata_reg_set |= bit(i); - w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32)); - *tfs = i; - return 0; - } else if (i >= 0) { - mvi->sata_reg_set |= bit(i); - w_reg_set_enable(i, (u32)mvi->sata_reg_set); - *tfs = i; - return 0; - } - return MVS_ID_NOT_MAPPED; -} - -static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd) -{ - int i; - struct scatterlist *sg; - struct mvs_prd *buf_prd = prd; - for_each_sg(scatter, sg, nr, i) { - buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); - buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg)); - buf_prd++; - } -} - -static int mvs_94xx_oob_done(struct mvs_info *mvi, int i) -{ - u32 phy_st; - phy_st = mvs_read_phy_ctl(mvi, i); - if (phy_st & PHY_READY_MASK) /* phy ready */ - return 1; - return 0; -} - -static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id, - struct sas_identify_frame *id) -{ - int i; - u32 id_frame[7]; - - for (i = 0; i < 7; i++) { - mvs_write_port_cfg_addr(mvi, port_id, - CONFIG_ID_FRAME0 + i * 4); - id_frame[i] = mvs_read_port_cfg_data(mvi, port_id); - } - memcpy(id, id_frame, 28); -} - -static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id, - struct sas_identify_frame *id) -{ - int i; - u32 id_frame[7]; - - /* mvs_hexdump(28, (u8 *)id_frame, 0); */ - for (i = 0; i < 7; i++) { - mvs_write_port_cfg_addr(mvi, port_id, - CONFIG_ATT_ID_FRAME0 + i * 4); - id_frame[i] = mvs_read_port_cfg_data(mvi, port_id); - mv_dprintk("94xx phy %d atta frame %d %x.\n", - port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]); - } - /* mvs_hexdump(28, (u8 *)id_frame, 0); */ - memcpy(id, id_frame, 28); -} - -static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id) -{ - u32 att_dev_info = 0; - - att_dev_info |= id->dev_type; - if (id->stp_iport) - att_dev_info |= PORT_DEV_STP_INIT; - if (id->smp_iport) - att_dev_info |= PORT_DEV_SMP_INIT; - if (id->ssp_iport) - att_dev_info |= PORT_DEV_SSP_INIT; - if (id->stp_tport) - att_dev_info |= PORT_DEV_STP_TRGT; - if (id->smp_tport) - att_dev_info |= PORT_DEV_SMP_TRGT; - if (id->ssp_tport) - att_dev_info |= PORT_DEV_SSP_TRGT; - - att_dev_info |= (u32)id->phy_id<<24; - return att_dev_info; -} - -static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id) -{ - return mvs_94xx_make_dev_info(id); -} - -static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i, - struct sas_identify_frame *id) -{ - struct mvs_phy *phy = &mvi->phy[i]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status); - sas_phy->linkrate = - (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >> - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET; - sas_phy->linkrate += 0x8; - mv_dprintk("get link rate is %d\n", sas_phy->linkrate); - phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; - phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS; - mvs_94xx_get_dev_identify_frame(mvi, i, id); - phy->dev_info = mvs_94xx_make_dev_info(id); - - if (phy->phy_type & PORT_TYPE_SAS) { - mvs_94xx_get_att_identify_frame(mvi, i, id); - phy->att_dev_info = mvs_94xx_make_att_info(id); - phy->att_dev_sas_addr = *(u64 *)id->sas_addr; - } else { - phy->att_dev_info = PORT_DEV_STP_TRGT | 1; - } - -} - -void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, - struct sas_phy_linkrates *rates) -{ - /* TODO */ -} - -static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi) -{ - u32 tmp; - void __iomem *regs = mvi->regs; - tmp = mr32(MVS_STP_REG_SET_0); - mw32(MVS_STP_REG_SET_0, 0); - mw32(MVS_STP_REG_SET_0, tmp); - tmp = mr32(MVS_STP_REG_SET_1); - mw32(MVS_STP_REG_SET_1, 0); - mw32(MVS_STP_REG_SET_1, tmp); -} - - -u32 mvs_94xx_spi_read_data(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs_ex - 0x10200; - return mr32(SPI_RD_DATA_REG_94XX); -} - -void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data) -{ - void __iomem *regs = mvi->regs_ex - 0x10200; - mw32(SPI_RD_DATA_REG_94XX, data); -} - - -int mvs_94xx_spi_buildcmd(struct mvs_info *mvi, - u32 *dwCmd, - u8 cmd, - u8 read, - u8 length, - u32 addr - ) -{ - void __iomem *regs = mvi->regs_ex - 0x10200; - u32 dwTmp; - - dwTmp = ((u32)cmd << 8) | ((u32)length << 4); - if (read) - dwTmp |= SPI_CTRL_READ_94XX; - - if (addr != MV_MAX_U32) { - mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL)); - dwTmp |= SPI_ADDR_VLD_94XX; - } - - *dwCmd = dwTmp; - return 0; -} - - -int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd) -{ - void __iomem *regs = mvi->regs_ex - 0x10200; - mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX); - - return 0; -} - -int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout) -{ - void __iomem *regs = mvi->regs_ex - 0x10200; - u32 i, dwTmp; - - for (i = 0; i < timeout; i++) { - dwTmp = mr32(SPI_CTRL_REG_94XX); - if (!(dwTmp & SPI_CTRL_SpiStart_94XX)) - return 0; - msleep(10); - } - - return -1; -} - -#ifndef DISABLE_HOTPLUG_DMA_FIX -void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd) -{ - int i; - struct mvs_prd *buf_prd = prd; - buf_prd += from; - for (i = 0; i < MAX_SG_ENTRY - from; i++) { - buf_prd->addr = cpu_to_le64(buf_dma); - buf_prd->im_len.len = cpu_to_le32(buf_len); - ++buf_prd; - } -} -#endif - -const struct mvs_dispatch mvs_94xx_dispatch = { - "mv94xx", - mvs_94xx_init, - NULL, - mvs_94xx_ioremap, - mvs_94xx_iounmap, - mvs_94xx_isr, - mvs_94xx_isr_status, - mvs_94xx_interrupt_enable, - mvs_94xx_interrupt_disable, - mvs_read_phy_ctl, - mvs_write_phy_ctl, - mvs_read_port_cfg_data, - mvs_write_port_cfg_data, - mvs_write_port_cfg_addr, - mvs_read_port_vsr_data, - mvs_write_port_vsr_data, - mvs_write_port_vsr_addr, - mvs_read_port_irq_stat, - mvs_write_port_irq_stat, - mvs_read_port_irq_mask, - mvs_write_port_irq_mask, - mvs_get_sas_addr, - mvs_94xx_command_active, - mvs_94xx_issue_stop, - mvs_start_delivery, - mvs_rx_update, - mvs_int_full, - mvs_94xx_assign_reg_set, - mvs_94xx_free_reg_set, - mvs_get_prd_size, - mvs_get_prd_count, - mvs_94xx_make_prd, - mvs_94xx_detect_porttype, - mvs_94xx_oob_done, - mvs_94xx_fix_phy_info, - NULL, - mvs_94xx_phy_set_link_rate, - mvs_hw_max_link_rate, - mvs_94xx_phy_disable, - mvs_94xx_phy_enable, - mvs_94xx_phy_reset, - NULL, - mvs_94xx_clear_active_cmds, - mvs_94xx_spi_read_data, - mvs_94xx_spi_write_data, - mvs_94xx_spi_buildcmd, - mvs_94xx_spi_issuecmd, - mvs_94xx_spi_waitdataready, -#ifndef DISABLE_HOTPLUG_DMA_FIX - mvs_94xx_fix_dma, -#endif -}; - diff --git a/trunk/drivers/scsi/mvsas/mv_94xx.h b/trunk/drivers/scsi/mvsas/mv_94xx.h deleted file mode 100644 index 23ed9b164669..000000000000 --- a/trunk/drivers/scsi/mvsas/mv_94xx.h +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Marvell 88SE94xx hardware specific head file - * - * Copyright 2007 Red Hat, Inc. - * Copyright 2008 Marvell. - * - * This file is licensed under GPLv2. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; version 2 of the - * License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA -*/ - -#ifndef _MVS94XX_REG_H_ -#define _MVS94XX_REG_H_ - -#include - -#define MAX_LINK_RATE SAS_LINK_RATE_6_0_GBPS - -enum hw_registers { - MVS_GBL_CTL = 0x04, /* global control */ - MVS_GBL_INT_STAT = 0x00, /* global irq status */ - MVS_GBL_PI = 0x0C, /* ports implemented bitmask */ - - MVS_PHY_CTL = 0x40, /* SOC PHY Control */ - MVS_PORTS_IMP = 0x9C, /* SOC Port Implemented */ - - MVS_GBL_PORT_TYPE = 0xa0, /* port type */ - - MVS_CTL = 0x100, /* SAS/SATA port configuration */ - MVS_PCS = 0x104, /* SAS/SATA port control/status */ - MVS_CMD_LIST_LO = 0x108, /* cmd list addr */ - MVS_CMD_LIST_HI = 0x10C, - MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */ - MVS_RX_FIS_HI = 0x114, - MVS_STP_REG_SET_0 = 0x118, /* STP/SATA Register Set Enable */ - MVS_STP_REG_SET_1 = 0x11C, - MVS_TX_CFG = 0x120, /* TX configuration */ - MVS_TX_LO = 0x124, /* TX (delivery) ring addr */ - MVS_TX_HI = 0x128, - - MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */ - MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */ - MVS_RX_CFG = 0x134, /* RX configuration */ - MVS_RX_LO = 0x138, /* RX (completion) ring addr */ - MVS_RX_HI = 0x13C, - MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */ - - MVS_INT_COAL = 0x148, /* Int coalescing config */ - MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */ - MVS_INT_STAT = 0x150, /* Central int status */ - MVS_INT_MASK = 0x154, /* Central int enable */ - MVS_INT_STAT_SRS_0 = 0x158, /* SATA register set status */ - MVS_INT_MASK_SRS_0 = 0x15C, - MVS_INT_STAT_SRS_1 = 0x160, - MVS_INT_MASK_SRS_1 = 0x164, - MVS_NON_NCQ_ERR_0 = 0x168, /* SRS Non-specific NCQ Error */ - MVS_NON_NCQ_ERR_1 = 0x16C, - MVS_CMD_ADDR = 0x170, /* Command register port (addr) */ - MVS_CMD_DATA = 0x174, /* Command register port (data) */ - MVS_MEM_PARITY_ERR = 0x178, /* Memory parity error */ - - /* ports 1-3 follow after this */ - MVS_P0_INT_STAT = 0x180, /* port0 interrupt status */ - MVS_P0_INT_MASK = 0x184, /* port0 interrupt mask */ - /* ports 5-7 follow after this */ - MVS_P4_INT_STAT = 0x1A0, /* Port4 interrupt status */ - MVS_P4_INT_MASK = 0x1A4, /* Port4 interrupt enable mask */ - - /* ports 1-3 follow after this */ - MVS_P0_SER_CTLSTAT = 0x1D0, /* port0 serial control/status */ - /* ports 5-7 follow after this */ - MVS_P4_SER_CTLSTAT = 0x1E0, /* port4 serial control/status */ - - /* ports 1-3 follow after this */ - MVS_P0_CFG_ADDR = 0x200, /* port0 phy register address */ - MVS_P0_CFG_DATA = 0x204, /* port0 phy register data */ - /* ports 5-7 follow after this */ - MVS_P4_CFG_ADDR = 0x220, /* Port4 config address */ - MVS_P4_CFG_DATA = 0x224, /* Port4 config data */ - - /* phys 1-3 follow after this */ - MVS_P0_VSR_ADDR = 0x250, /* phy0 VSR address */ - MVS_P0_VSR_DATA = 0x254, /* phy0 VSR data */ - /* phys 1-3 follow after this */ - /* multiplexing */ - MVS_P4_VSR_ADDR = 0x250, /* phy4 VSR address */ - MVS_P4_VSR_DATA = 0x254, /* phy4 VSR data */ - MVS_PA_VSR_ADDR = 0x290, /* All port VSR addr */ - MVS_PA_VSR_PORT = 0x294, /* All port VSR data */ -}; - -enum pci_cfg_registers { - PCR_PHY_CTL = 0x40, - PCR_PHY_CTL2 = 0x90, - PCR_DEV_CTRL = 0x78, - PCR_LINK_STAT = 0x82, -}; - -/* SAS/SATA Vendor Specific Port Registers */ -enum sas_sata_vsp_regs { - VSR_PHY_STAT = 0x00 * 4, /* Phy Status */ - VSR_PHY_MODE1 = 0x01 * 4, /* phy tx */ - VSR_PHY_MODE2 = 0x02 * 4, /* tx scc */ - VSR_PHY_MODE3 = 0x03 * 4, /* pll */ - VSR_PHY_MODE4 = 0x04 * 4, /* VCO */ - VSR_PHY_MODE5 = 0x05 * 4, /* Rx */ - VSR_PHY_MODE6 = 0x06 * 4, /* CDR */ - VSR_PHY_MODE7 = 0x07 * 4, /* Impedance */ - VSR_PHY_MODE8 = 0x08 * 4, /* Voltage */ - VSR_PHY_MODE9 = 0x09 * 4, /* Test */ - VSR_PHY_MODE10 = 0x0A * 4, /* Power */ - VSR_PHY_MODE11 = 0x0B * 4, /* Phy Mode */ - VSR_PHY_VS0 = 0x0C * 4, /* Vednor Specific 0 */ - VSR_PHY_VS1 = 0x0D * 4, /* Vednor Specific 1 */ -}; - -enum chip_register_bits { - PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8), - PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8), - PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12), - PHY_NEG_SPP_PHYS_LINK_RATE_MASK = - (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET), -}; - -enum pci_interrupt_cause { - /* MAIN_IRQ_CAUSE (R10200) Bits*/ - IRQ_COM_IN_I2O_IOP0 = (1 << 0), - IRQ_COM_IN_I2O_IOP1 = (1 << 1), - IRQ_COM_IN_I2O_IOP2 = (1 << 2), - IRQ_COM_IN_I2O_IOP3 = (1 << 3), - IRQ_COM_OUT_I2O_HOS0 = (1 << 4), - IRQ_COM_OUT_I2O_HOS1 = (1 << 5), - IRQ_COM_OUT_I2O_HOS2 = (1 << 6), - IRQ_COM_OUT_I2O_HOS3 = (1 << 7), - IRQ_PCIF_TO_CPU_DRBL0 = (1 << 8), - IRQ_PCIF_TO_CPU_DRBL1 = (1 << 9), - IRQ_PCIF_TO_CPU_DRBL2 = (1 << 10), - IRQ_PCIF_TO_CPU_DRBL3 = (1 << 11), - IRQ_PCIF_DRBL0 = (1 << 12), - IRQ_PCIF_DRBL1 = (1 << 13), - IRQ_PCIF_DRBL2 = (1 << 14), - IRQ_PCIF_DRBL3 = (1 << 15), - IRQ_XOR_A = (1 << 16), - IRQ_XOR_B = (1 << 17), - IRQ_SAS_A = (1 << 18), - IRQ_SAS_B = (1 << 19), - IRQ_CPU_CNTRL = (1 << 20), - IRQ_GPIO = (1 << 21), - IRQ_UART = (1 << 22), - IRQ_SPI = (1 << 23), - IRQ_I2C = (1 << 24), - IRQ_SGPIO = (1 << 25), - IRQ_COM_ERR = (1 << 29), - IRQ_I2O_ERR = (1 << 30), - IRQ_PCIE_ERR = (1 << 31), -}; - -#define MAX_SG_ENTRY 255 - -struct mvs_prd_imt { - __le32 len:22; - u8 _r_a:2; - u8 misc_ctl:4; - u8 inter_sel:4; -}; - -struct mvs_prd { - /* 64-bit buffer address */ - __le64 addr; - /* 22-bit length */ - struct mvs_prd_imt im_len; -} __attribute__ ((packed)); - -#define SPI_CTRL_REG_94XX 0xc800 -#define SPI_ADDR_REG_94XX 0xc804 -#define SPI_WR_DATA_REG_94XX 0xc808 -#define SPI_RD_DATA_REG_94XX 0xc80c -#define SPI_CTRL_READ_94XX (1U << 2) -#define SPI_ADDR_VLD_94XX (1U << 1) -#define SPI_CTRL_SpiStart_94XX (1U << 0) - -#define mv_ffc(x) ffz(x) - -static inline int -mv_ffc64(u64 v) -{ - int i; - i = mv_ffc((u32)v); - if (i >= 0) - return i; - i = mv_ffc((u32)(v>>32)); - - if (i != 0) - return 32 + i; - - return -1; -} - -#define r_reg_set_enable(i) \ - (((i) > 31) ? mr32(MVS_STP_REG_SET_1) : \ - mr32(MVS_STP_REG_SET_0)) - -#define w_reg_set_enable(i, tmp) \ - (((i) > 31) ? mw32(MVS_STP_REG_SET_1, tmp) : \ - mw32(MVS_STP_REG_SET_0, tmp)) - -extern const struct mvs_dispatch mvs_94xx_dispatch; -#endif - diff --git a/trunk/drivers/scsi/mvsas/mv_chips.h b/trunk/drivers/scsi/mvsas/mv_chips.h deleted file mode 100644 index a67e1c4172f9..000000000000 --- a/trunk/drivers/scsi/mvsas/mv_chips.h +++ /dev/null @@ -1,280 +0,0 @@ -/* - * Marvell 88SE64xx/88SE94xx register IO interface - * - * Copyright 2007 Red Hat, Inc. - * Copyright 2008 Marvell. - * - * This file is licensed under GPLv2. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; version 2 of the - * License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA -*/ - - -#ifndef _MV_CHIPS_H_ -#define _MV_CHIPS_H_ - -#define mr32(reg) readl(regs + reg) -#define mw32(reg, val) writel((val), regs + reg) -#define mw32_f(reg, val) do { \ - mw32(reg, val); \ - mr32(reg); \ - } while (0) - -#define iow32(reg, val) outl(val, (unsigned long)(regs + reg)) -#define ior32(reg) inl((unsigned long)(regs + reg)) -#define iow16(reg, val) outw((unsigned long)(val, regs + reg)) -#define ior16(reg) inw((unsigned long)(regs + reg)) -#define iow8(reg, val) outb((unsigned long)(val, regs + reg)) -#define ior8(reg) inb((unsigned long)(regs + reg)) - -static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr) -{ - void __iomem *regs = mvi->regs; - mw32(MVS_CMD_ADDR, addr); - return mr32(MVS_CMD_DATA); -} - -static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val) -{ - void __iomem *regs = mvi->regs; - mw32(MVS_CMD_ADDR, addr); - mw32(MVS_CMD_DATA, val); -} - -static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port) -{ - void __iomem *regs = mvi->regs; - return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) : - mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4); -} - -static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val) -{ - void __iomem *regs = mvi->regs; - if (port < 4) - mw32(MVS_P0_SER_CTLSTAT + port * 4, val); - else - mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val); -} - -static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off, - u32 off2, u32 port) -{ - void __iomem *regs = mvi->regs + off; - void __iomem *regs2 = mvi->regs + off2; - return (port < 4) ? readl(regs + port * 8) : - readl(regs2 + (port - 4) * 8); -} - -static inline void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2, - u32 port, u32 val) -{ - void __iomem *regs = mvi->regs + off; - void __iomem *regs2 = mvi->regs + off2; - if (port < 4) - writel(val, regs + port * 8); - else - writel(val, regs2 + (port - 4) * 8); -} - -static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port) -{ - return mvs_read_port(mvi, MVS_P0_CFG_DATA, - MVS_P4_CFG_DATA, port); -} - -static inline void mvs_write_port_cfg_data(struct mvs_info *mvi, - u32 port, u32 val) -{ - mvs_write_port(mvi, MVS_P0_CFG_DATA, - MVS_P4_CFG_DATA, port, val); -} - -static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi, - u32 port, u32 addr) -{ - mvs_write_port(mvi, MVS_P0_CFG_ADDR, - MVS_P4_CFG_ADDR, port, addr); - mdelay(10); -} - -static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port) -{ - return mvs_read_port(mvi, MVS_P0_VSR_DATA, - MVS_P4_VSR_DATA, port); -} - -static inline void mvs_write_port_vsr_data(struct mvs_info *mvi, - u32 port, u32 val) -{ - mvs_write_port(mvi, MVS_P0_VSR_DATA, - MVS_P4_VSR_DATA, port, val); -} - -static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi, - u32 port, u32 addr) -{ - mvs_write_port(mvi, MVS_P0_VSR_ADDR, - MVS_P4_VSR_ADDR, port, addr); - mdelay(10); -} - -static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port) -{ - return mvs_read_port(mvi, MVS_P0_INT_STAT, - MVS_P4_INT_STAT, port); -} - -static inline void mvs_write_port_irq_stat(struct mvs_info *mvi, - u32 port, u32 val) -{ - mvs_write_port(mvi, MVS_P0_INT_STAT, - MVS_P4_INT_STAT, port, val); -} - -static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port) -{ - return mvs_read_port(mvi, MVS_P0_INT_MASK, - MVS_P4_INT_MASK, port); - -} - -static inline void mvs_write_port_irq_mask(struct mvs_info *mvi, - u32 port, u32 val) -{ - mvs_write_port(mvi, MVS_P0_INT_MASK, - MVS_P4_INT_MASK, port, val); -} - -static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi) -{ - u32 tmp; - - /* workaround for SATA R-ERR, to ignore phy glitch */ - tmp = mvs_cr32(mvi, CMD_PHY_TIMER); - tmp &= ~(1 << 9); - tmp |= (1 << 10); - mvs_cw32(mvi, CMD_PHY_TIMER, tmp); - - /* enable retry 127 times */ - mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f); - - /* extend open frame timeout to max */ - tmp = mvs_cr32(mvi, CMD_SAS_CTL0); - tmp &= ~0xffff; - tmp |= 0x3fff; - mvs_cw32(mvi, CMD_SAS_CTL0, tmp); - - /* workaround for WDTIMEOUT , set to 550 ms */ - mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000); - - /* not to halt for different port op during wideport link change */ - mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d); - - /* workaround for Seagate disk not-found OOB sequence, recv - * COMINIT before sending out COMWAKE */ - tmp = mvs_cr32(mvi, CMD_PHY_MODE_21); - tmp &= 0x0000ffff; - tmp |= 0x00fa0000; - mvs_cw32(mvi, CMD_PHY_MODE_21, tmp); - - tmp = mvs_cr32(mvi, CMD_PHY_TIMER); - tmp &= 0x1fffffff; - tmp |= (2U << 29); /* 8 ms retry */ - mvs_cw32(mvi, CMD_PHY_TIMER, tmp); -} - -static inline void mvs_int_sata(struct mvs_info *mvi) -{ - u32 tmp; - void __iomem *regs = mvi->regs; - tmp = mr32(MVS_INT_STAT_SRS_0); - if (tmp) - mw32(MVS_INT_STAT_SRS_0, tmp); - MVS_CHIP_DISP->clear_active_cmds(mvi); -} - -static inline void mvs_int_full(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - u32 tmp, stat; - int i; - - stat = mr32(MVS_INT_STAT); - mvs_int_rx(mvi, false); - - for (i = 0; i < mvi->chip->n_phy; i++) { - tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED); - if (tmp) - mvs_int_port(mvi, i, tmp); - } - - if (stat & CINT_SRS) - mvs_int_sata(mvi); - - mw32(MVS_INT_STAT, stat); -} - -static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx) -{ - void __iomem *regs = mvi->regs; - mw32(MVS_TX_PROD_IDX, tx); -} - -static inline u32 mvs_rx_update(struct mvs_info *mvi) -{ - void __iomem *regs = mvi->regs; - return mr32(MVS_RX_CONS_IDX); -} - -static inline u32 mvs_get_prd_size(void) -{ - return sizeof(struct mvs_prd); -} - -static inline u32 mvs_get_prd_count(void) -{ - return MAX_SG_ENTRY; -} - -static inline void mvs_show_pcie_usage(struct mvs_info *mvi) -{ - u16 link_stat, link_spd; - const char *spd[] = { - "UnKnown", - "2.5", - "5.0", - }; - if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0) - return; - - pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat); - link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS; - if (link_spd >= 3) - link_spd = 0; - dev_printk(KERN_INFO, mvi->dev, - "mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n", - (link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS, - spd[link_spd]); -} - -static inline u32 mvs_hw_max_link_rate(void) -{ - return MAX_LINK_RATE; -} - -#endif /* _MV_CHIPS_H_ */ - diff --git a/trunk/drivers/scsi/mvsas/mv_defs.h b/trunk/drivers/scsi/mvsas/mv_defs.h deleted file mode 100644 index f8cb9defb961..000000000000 --- a/trunk/drivers/scsi/mvsas/mv_defs.h +++ /dev/null @@ -1,502 +0,0 @@ -/* - * Marvell 88SE64xx/88SE94xx const head file - * - * Copyright 2007 Red Hat, Inc. - * Copyright 2008 Marvell. - * - * This file is licensed under GPLv2. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; version 2 of the - * License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA -*/ - -#ifndef _MV_DEFS_H_ -#define _MV_DEFS_H_ - - -enum chip_flavors { - chip_6320, - chip_6440, - chip_6485, - chip_9480, - chip_9180, -}; - -/* driver compile-time configuration */ -enum driver_configuration { - MVS_SLOTS = 512, /* command slots */ - MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */ - MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */ - /* software requires power-of-2 - ring size */ - MVS_SOC_SLOTS = 64, - MVS_SOC_TX_RING_SZ = MVS_SOC_SLOTS * 2, - MVS_SOC_RX_RING_SZ = MVS_SOC_SLOTS * 2, - - MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */ - MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */ - MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */ - MVS_OAF_SZ = 64, /* Open address frame buffer size */ - MVS_QUEUE_SIZE = 32, /* Support Queue depth */ - MVS_CAN_QUEUE = MVS_SLOTS - 2, /* SCSI Queue depth */ - MVS_SOC_CAN_QUEUE = MVS_SOC_SLOTS - 2, -}; - -/* unchangeable hardware details */ -enum hardware_details { - MVS_MAX_PHYS = 8, /* max. possible phys */ - MVS_MAX_PORTS = 8, /* max. possible ports */ - MVS_SOC_PHYS = 4, /* soc phys */ - MVS_SOC_PORTS = 4, /* soc phys */ - MVS_MAX_DEVICES = 1024, /* max supported device */ -}; - -/* peripheral registers (BAR2) */ -enum peripheral_registers { - SPI_CTL = 0x10, /* EEPROM control */ - SPI_CMD = 0x14, /* EEPROM command */ - SPI_DATA = 0x18, /* EEPROM data */ -}; - -enum peripheral_register_bits { - TWSI_RDY = (1U << 7), /* EEPROM interface ready */ - TWSI_RD = (1U << 4), /* EEPROM read access */ - - SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */ -}; - -enum hw_register_bits { - /* MVS_GBL_CTL */ - INT_EN = (1U << 1), /* Global int enable */ - HBA_RST = (1U << 0), /* HBA reset */ - - /* MVS_GBL_INT_STAT */ - INT_XOR = (1U << 4), /* XOR engine event */ - INT_SAS_SATA = (1U << 0), /* SAS/SATA event */ - - /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */ - SATA_TARGET = (1U << 16), /* port0 SATA target enable */ - MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */ - MODE_AUTO_DET_PORT6 = (1U << 14), - MODE_AUTO_DET_PORT5 = (1U << 13), - MODE_AUTO_DET_PORT4 = (1U << 12), - MODE_AUTO_DET_PORT3 = (1U << 11), - MODE_AUTO_DET_PORT2 = (1U << 10), - MODE_AUTO_DET_PORT1 = (1U << 9), - MODE_AUTO_DET_PORT0 = (1U << 8), - MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 | - MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 | - MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 | - MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7, - MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */ - MODE_SAS_PORT6_MASK = (1U << 6), - MODE_SAS_PORT5_MASK = (1U << 5), - MODE_SAS_PORT4_MASK = (1U << 4), - MODE_SAS_PORT3_MASK = (1U << 3), - MODE_SAS_PORT2_MASK = (1U << 2), - MODE_SAS_PORT1_MASK = (1U << 1), - MODE_SAS_PORT0_MASK = (1U << 0), - MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK | - MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK | - MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK | - MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK, - - /* SAS_MODE value may be - * dictated (in hw) by values - * of SATA_TARGET & AUTO_DET - */ - - /* MVS_TX_CFG */ - TX_EN = (1U << 16), /* Enable TX */ - TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */ - - /* MVS_RX_CFG */ - RX_EN = (1U << 16), /* Enable RX */ - RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */ - - /* MVS_INT_COAL */ - COAL_EN = (1U << 16), /* Enable int coalescing */ - - /* MVS_INT_STAT, MVS_INT_MASK */ - CINT_I2C = (1U << 31), /* I2C event */ - CINT_SW0 = (1U << 30), /* software event 0 */ - CINT_SW1 = (1U << 29), /* software event 1 */ - CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */ - CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */ - CINT_MEM = (1U << 26), /* int mem parity err */ - CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */ - CINT_SRS = (1U << 3), /* SRS event */ - CINT_CI_STOP = (1U << 1), /* cmd issue stopped */ - CINT_DONE = (1U << 0), /* cmd completion */ - - /* shl for ports 1-3 */ - CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */ - CINT_PORT = (1U << 8), /* port0 event */ - CINT_PORT_MASK_OFFSET = 8, - CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET), - CINT_PHY_MASK_OFFSET = 4, - CINT_PHY_MASK = (0x0F << CINT_PHY_MASK_OFFSET), - - /* TX (delivery) ring bits */ - TXQ_CMD_SHIFT = 29, - TXQ_CMD_SSP = 1, /* SSP protocol */ - TXQ_CMD_SMP = 2, /* SMP protocol */ - TXQ_CMD_STP = 3, /* STP/SATA protocol */ - TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */ - TXQ_CMD_SLOT_RESET = 7, /* reset command slot */ - TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */ - TXQ_MODE_TARGET = 0, - TXQ_MODE_INITIATOR = 1, - TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */ - TXQ_PRI_NORMAL = 0, - TXQ_PRI_HIGH = 1, - TXQ_SRS_SHIFT = 20, /* SATA register set */ - TXQ_SRS_MASK = 0x7f, - TXQ_PHY_SHIFT = 12, /* PHY bitmap */ - TXQ_PHY_MASK = 0xff, - TXQ_SLOT_MASK = 0xfff, /* slot number */ - - /* RX (completion) ring bits */ - RXQ_GOOD = (1U << 23), /* Response good */ - RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */ - RXQ_CMD_RX = (1U << 20), /* target cmd received */ - RXQ_ATTN = (1U << 19), /* attention */ - RXQ_RSP = (1U << 18), /* response frame xfer'd */ - RXQ_ERR = (1U << 17), /* err info rec xfer'd */ - RXQ_DONE = (1U << 16), /* cmd complete */ - RXQ_SLOT_MASK = 0xfff, /* slot number */ - - /* mvs_cmd_hdr bits */ - MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */ - MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */ - - /* SSP initiator only */ - MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */ - - /* SSP initiator or target */ - MCH_SSP_FR_TASK = 0x1, /* TASK frame */ - - /* SSP target only */ - MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */ - MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */ - MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */ - MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */ - - MCH_SSP_MODE_PASSTHRU = 1, - MCH_SSP_MODE_NORMAL = 0, - MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */ - MCH_FBURST = (1U << 11), /* first burst (SSP) */ - MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */ - MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */ - MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */ - MCH_RESET = (1U << 7), /* Reset (STP/SATA) */ - MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */ - MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */ - MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */ - MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/ - - CCTL_RST = (1U << 5), /* port logic reset */ - - /* 0(LSB first), 1(MSB first) */ - CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */ - CCTL_ENDIAN_RSP = (1U << 2), /* response frame */ - CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */ - CCTL_ENDIAN_CMD = (1U << 0), /* command table */ - - /* MVS_Px_SER_CTLSTAT (per-phy control) */ - PHY_SSP_RST = (1U << 3), /* reset SSP link layer */ - PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */ - PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */ - PHY_RST = (1U << 0), /* phy reset */ - PHY_READY_MASK = (1U << 20), - - /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */ - PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */ - PHYEV_DCDR_ERR = (1U << 23), /* STP Deocder Error */ - PHYEV_CRC_ERR = (1U << 22), /* STP CRC Error */ - PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */ - PHYEV_AN = (1U << 18), /* SATA async notification */ - PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */ - PHYEV_SIG_FIS = (1U << 16), /* signature FIS */ - PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */ - PHYEV_IU_BIG = (1U << 11), /* IU too long err */ - PHYEV_IU_SMALL = (1U << 10), /* IU too short err */ - PHYEV_UNK_TAG = (1U << 9), /* unknown tag */ - PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */ - PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */ - PHYEV_PORT_SEL = (1U << 6), /* port selector present */ - PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */ - PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */ - PHYEV_ID_FAIL = (1U << 3), /* identify failed */ - PHYEV_ID_DONE = (1U << 2), /* identify done */ - PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */ - PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */ - - /* MVS_PCS */ - PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */ - PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */ - PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6485 */ - PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */ - PCS_RSP_RX_EN = (1U << 7), /* raw response rx */ - PCS_SATA_RETRY_2 = (1U << 6), /* For 9180 */ - PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */ - PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */ - PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */ - PCS_CMD_RST = (1U << 1), /* reset cmd issue */ - PCS_CMD_EN = (1U << 0), /* enable cmd issue */ - - /* Port n Attached Device Info */ - PORT_DEV_SSP_TRGT = (1U << 19), - PORT_DEV_SMP_TRGT = (1U << 18), - PORT_DEV_STP_TRGT = (1U << 17), - PORT_DEV_SSP_INIT = (1U << 11), - PORT_DEV_SMP_INIT = (1U << 10), - PORT_DEV_STP_INIT = (1U << 9), - PORT_PHY_ID_MASK = (0xFFU << 24), - PORT_SSP_TRGT_MASK = (0x1U << 19), - PORT_SSP_INIT_MASK = (0x1U << 11), - PORT_DEV_TRGT_MASK = (0x7U << 17), - PORT_DEV_INIT_MASK = (0x7U << 9), - PORT_DEV_TYPE_MASK = (0x7U << 0), - - /* Port n PHY Status */ - PHY_RDY = (1U << 2), - PHY_DW_SYNC = (1U << 1), - PHY_OOB_DTCTD = (1U << 0), - - /* VSR */ - /* PHYMODE 6 (CDB) */ - PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */ - PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */ - PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/ - PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */ - PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */ - PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */ - PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */ - PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */ - PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */ - PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */ - PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */ - PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */ - PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */ - PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */ -}; - -/* SAS/SATA configuration port registers, aka phy registers */ -enum sas_sata_config_port_regs { - PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */ - PHYR_ADDR_LO = 0x04, /* my SAS address (low) */ - PHYR_ADDR_HI = 0x08, /* my SAS address (high) */ - PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */ - PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */ - PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */ - PHYR_SATA_CTL = 0x18, /* SATA control */ - PHYR_PHY_STAT = 0x1C, /* PHY status */ - PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */ - PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */ - PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */ - PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */ - PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */ - PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */ - PHYR_WIDE_PORT = 0x38, /* wide port participating */ - PHYR_CURRENT0 = 0x80, /* current connection info 0 */ - PHYR_CURRENT1 = 0x84, /* current connection info 1 */ - PHYR_CURRENT2 = 0x88, /* current connection info 2 */ - CONFIG_ID_FRAME0 = 0x100, /* Port device ID frame register 0 */ - CONFIG_ID_FRAME1 = 0x104, /* Port device ID frame register 1 */ - CONFIG_ID_FRAME2 = 0x108, /* Port device ID frame register 2 */ - CONFIG_ID_FRAME3 = 0x10c, /* Port device ID frame register 3 */ - CONFIG_ID_FRAME4 = 0x110, /* Port device ID frame register 4 */ - CONFIG_ID_FRAME5 = 0x114, /* Port device ID frame register 5 */ - CONFIG_ID_FRAME6 = 0x118, /* Port device ID frame register 6 */ - CONFIG_ATT_ID_FRAME0 = 0x11c, /* attached ID frame register 0 */ - CONFIG_ATT_ID_FRAME1 = 0x120, /* attached ID frame register 1 */ - CONFIG_ATT_ID_FRAME2 = 0x124, /* attached ID frame register 2 */ - CONFIG_ATT_ID_FRAME3 = 0x128, /* attached ID frame register 3 */ - CONFIG_ATT_ID_FRAME4 = 0x12c, /* attached ID frame register 4 */ - CONFIG_ATT_ID_FRAME5 = 0x130, /* attached ID frame register 5 */ - CONFIG_ATT_ID_FRAME6 = 0x134, /* attached ID frame register 6 */ -}; - -enum sas_cmd_port_registers { - CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */ - CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */ - CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */ - CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */ - CMD_OOB_SPACE = 0x110, /* OOB space control register */ - CMD_OOB_BURST = 0x114, /* OOB burst control register */ - CMD_PHY_TIMER = 0x118, /* PHY timer control register */ - CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */ - CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */ - CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */ - CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */ - CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */ - CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */ - CMD_ID_TEST = 0x134, /* ID test register */ - CMD_PL_TIMER = 0x138, /* PL timer register */ - CMD_WD_TIMER = 0x13c, /* WD timer register */ - CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */ - CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */ - CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */ - CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */ - CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */ - CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */ - CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */ - CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */ - CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */ - CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */ - CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */ - CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */ - CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */ - CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */ - CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */ - CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */ - CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */ - CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */ - CMD_RESET_COUNT = 0x188, /* Reset Count */ - CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */ - CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */ - CMD_PHY_CTL = 0x194, /* PHY Control and Status */ - CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */ - CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */ - CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */ - CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */ - CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */ - CMD_HOST_CTL = 0x1AC, /* Host Control Status */ - CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */ - CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */ - CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */ - CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */ - CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */ - CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ -}; - -enum mvs_info_flags { - MVF_MSI = (1U << 0), /* MSI is enabled */ - MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */ - MVF_FLAG_SOC = (1U << 2), /* SoC integrated controllers */ -}; - -enum mvs_event_flags { - PHY_PLUG_EVENT = (3U), - PHY_PLUG_IN = (1U << 0), /* phy plug in */ - PHY_PLUG_OUT = (1U << 1), /* phy plug out */ -}; - -enum mvs_port_type { - PORT_TGT_MASK = (1U << 5), - PORT_INIT_PORT = (1U << 4), - PORT_TGT_PORT = (1U << 3), - PORT_INIT_TGT_PORT = (PORT_INIT_PORT | PORT_TGT_PORT), - PORT_TYPE_SAS = (1U << 1), - PORT_TYPE_SATA = (1U << 0), -}; - -/* Command Table Format */ -enum ct_format { - /* SSP */ - SSP_F_H = 0x00, - SSP_F_IU = 0x18, - SSP_F_MAX = 0x4D, - /* STP */ - STP_CMD_FIS = 0x00, - STP_ATAPI_CMD = 0x40, - STP_F_MAX = 0x10, - /* SMP */ - SMP_F_T = 0x00, - SMP_F_DEP = 0x01, - SMP_F_MAX = 0x101, -}; - -enum status_buffer { - SB_EIR_OFF = 0x00, /* Error Information Record */ - SB_RFB_OFF = 0x08, /* Response Frame Buffer */ - SB_RFB_MAX = 0x400, /* RFB size*/ -}; - -enum error_info_rec { - CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */ - CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */ - RSP_OVER = (1U << 29), /* rsp buffer overflow */ - RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */ - UNK_FIS = (1U << 27), /* unknown FIS */ - DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */ - SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */ - TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */ - R_ERR = (1U << 23), /* SATA returned R_ERR prim */ - RD_OFS = (1U << 20), /* Read DATA frame invalid offset */ - XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */ - UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */ - DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */ - INTERLOCK = (1U << 15), /* interlock error */ - NAK = (1U << 14), /* NAK rx'd */ - ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */ - CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */ - OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */ - PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */ - NO_DEST = (1U << 9), /* I_T nexus lost, no destination */ - STP_RES_BSY = (1U << 8), /* STP resources busy */ - BREAK = (1U << 7), /* break received */ - BAD_DEST = (1U << 6), /* bad destination */ - BAD_PROTO = (1U << 5), /* protocol not supported */ - BAD_RATE = (1U << 4), /* cxn rate not supported */ - WRONG_DEST = (1U << 3), /* wrong destination error */ - CREDIT_TO = (1U << 2), /* credit timeout */ - WDOG_TO = (1U << 1), /* watchdog timeout */ - BUF_PAR = (1U << 0), /* buffer parity error */ -}; - -enum error_info_rec_2 { - SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */ - GRD_CHK_ERR = (1U << 14), /* Guard Check Error */ - APP_CHK_ERR = (1U << 13), /* Application Check error */ - REF_CHK_ERR = (1U << 12), /* Reference Check Error */ - USR_BLK_NM = (1U << 0), /* User Block Number */ -}; - -enum pci_cfg_register_bits { - PCTL_PWR_OFF = (0xFU << 24), - PCTL_COM_ON = (0xFU << 20), - PCTL_LINK_RST = (0xFU << 16), - PCTL_LINK_OFFS = (16), - PCTL_PHY_DSBL = (0xFU << 12), - PCTL_PHY_DSBL_OFFS = (12), - PRD_REQ_SIZE = (0x4000), - PRD_REQ_MASK = (0x00007000), - PLS_NEG_LINK_WD = (0x3FU << 4), - PLS_NEG_LINK_WD_OFFS = 4, - PLS_LINK_SPD = (0x0FU << 0), - PLS_LINK_SPD_OFFS = 0, -}; - -enum open_frame_protocol { - PROTOCOL_SMP = 0x0, - PROTOCOL_SSP = 0x1, - PROTOCOL_STP = 0x2, -}; - -/* define for response frame datapres field */ -enum datapres_field { - NO_DATA = 0, - RESPONSE_DATA = 1, - SENSE_DATA = 2, -}; - -/* define task management IU */ -struct mvs_tmf_task{ - u8 tmf; - u16 tag_of_task_to_be_managed; -}; -#endif diff --git a/trunk/drivers/scsi/mvsas/mv_init.c b/trunk/drivers/scsi/mvsas/mv_init.c deleted file mode 100644 index 8646a19f999d..000000000000 --- a/trunk/drivers/scsi/mvsas/mv_init.c +++ /dev/null @@ -1,703 +0,0 @@ -/* - * Marvell 88SE64xx/88SE94xx pci init - * - * Copyright 2007 Red Hat, Inc. - * Copyright 2008 Marvell. - * - * This file is licensed under GPLv2. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; version 2 of the - * License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA -*/ - - -#include "mv_sas.h" - -static struct scsi_transport_template *mvs_stt; -static const struct mvs_chip_info mvs_chips[] = { - [chip_6320] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, - [chip_6440] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch, }, - [chip_6485] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, }, - [chip_9180] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, - [chip_9480] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch, }, -}; - -#define SOC_SAS_NUM 2 - -static struct scsi_host_template mvs_sht = { - .module = THIS_MODULE, - .name = DRV_NAME, - .queuecommand = sas_queuecommand, - .target_alloc = sas_target_alloc, - .slave_configure = mvs_slave_configure, - .slave_destroy = sas_slave_destroy, - .scan_finished = mvs_scan_finished, - .scan_start = mvs_scan_start, - .change_queue_depth = sas_change_queue_depth, - .change_queue_type = sas_change_queue_type, - .bios_param = sas_bios_param, - .can_queue = 1, - .cmd_per_lun = 1, - .this_id = -1, - .sg_tablesize = SG_ALL, - .max_sectors = SCSI_DEFAULT_MAX_SECTORS, - .use_clustering = ENABLE_CLUSTERING, - .eh_device_reset_handler = sas_eh_device_reset_handler, - .eh_bus_reset_handler = sas_eh_bus_reset_handler, - .slave_alloc = mvs_slave_alloc, - .target_destroy = sas_target_destroy, - .ioctl = sas_ioctl, -}; - -static struct sas_domain_function_template mvs_transport_ops = { - .lldd_dev_found = mvs_dev_found, - .lldd_dev_gone = mvs_dev_gone, - - .lldd_execute_task = mvs_queue_command, - .lldd_control_phy = mvs_phy_control, - - .lldd_abort_task = mvs_abort_task, - .lldd_abort_task_set = mvs_abort_task_set, - .lldd_clear_aca = mvs_clear_aca, - .lldd_clear_task_set = mvs_clear_task_set, - .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset, - .lldd_lu_reset = mvs_lu_reset, - .lldd_query_task = mvs_query_task, - - .lldd_port_formed = mvs_port_formed, - .lldd_port_deformed = mvs_port_deformed, - -}; - -static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id) -{ - struct mvs_phy *phy = &mvi->phy[phy_id]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - - phy->mvi = mvi; - init_timer(&phy->timer); - sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; - sas_phy->class = SAS; - sas_phy->iproto = SAS_PROTOCOL_ALL; - sas_phy->tproto = 0; - sas_phy->type = PHY_TYPE_PHYSICAL; - sas_phy->role = PHY_ROLE_INITIATOR; - sas_phy->oob_mode = OOB_NOT_CONNECTED; - sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; - - sas_phy->id = phy_id; - sas_phy->sas_addr = &mvi->sas_addr[0]; - sas_phy->frame_rcvd = &phy->frame_rcvd[0]; - sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata; - sas_phy->lldd_phy = phy; -} - -static void mvs_free(struct mvs_info *mvi) -{ - int i; - struct mvs_wq *mwq; - int slot_nr; - - if (!mvi) - return; - - if (mvi->flags & MVF_FLAG_SOC) - slot_nr = MVS_SOC_SLOTS; - else - slot_nr = MVS_SLOTS; - - for (i = 0; i < mvi->tags_num; i++) { - struct mvs_slot_info *slot = &mvi->slot_info[i]; - if (slot->buf) - dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ, - slot->buf, slot->buf_dma); - } - - if (mvi->tx) - dma_free_coherent(mvi->dev, - sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, - mvi->tx, mvi->tx_dma); - if (mvi->rx_fis) - dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ, - mvi->rx_fis, mvi->rx_fis_dma); - if (mvi->rx) - dma_free_coherent(mvi->dev, - sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), - mvi->rx, mvi->rx_dma); - if (mvi->slot) - dma_free_coherent(mvi->dev, - sizeof(*mvi->slot) * slot_nr, - mvi->slot, mvi->slot_dma); -#ifndef DISABLE_HOTPLUG_DMA_FIX - if (mvi->bulk_buffer) - dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE, - mvi->bulk_buffer, mvi->bulk_buffer_dma); -#endif - - MVS_CHIP_DISP->chip_iounmap(mvi); - if (mvi->shost) - scsi_host_put(mvi->shost); - list_for_each_entry(mwq, &mvi->wq_list, entry) - cancel_delayed_work(&mwq->work_q); - kfree(mvi); -} - -#ifdef MVS_USE_TASKLET -struct tasklet_struct mv_tasklet; -static void mvs_tasklet(unsigned long opaque) -{ - unsigned long flags; - u32 stat; - u16 core_nr, i = 0; - - struct mvs_info *mvi; - struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque; - - core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; - mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; - - if (unlikely(!mvi)) - BUG_ON(1); - - for (i = 0; i < core_nr; i++) { - mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; - stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq); - if (stat) - MVS_CHIP_DISP->isr(mvi, mvi->irq, stat); - } - -} -#endif - -static irqreturn_t mvs_interrupt(int irq, void *opaque) -{ - u32 core_nr, i = 0; - u32 stat; - struct mvs_info *mvi; - struct sas_ha_struct *sha = opaque; - - core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; - mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; - - if (unlikely(!mvi)) - return IRQ_NONE; - - stat = MVS_CHIP_DISP->isr_status(mvi, irq); - if (!stat) - return IRQ_NONE; - -#ifdef MVS_USE_TASKLET - tasklet_schedule(&mv_tasklet); -#else - for (i = 0; i < core_nr; i++) { - mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; - MVS_CHIP_DISP->isr(mvi, irq, stat); - } -#endif - return IRQ_HANDLED; -} - -static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) -{ - int i, slot_nr; - - if (mvi->flags & MVF_FLAG_SOC) - slot_nr = MVS_SOC_SLOTS; - else - slot_nr = MVS_SLOTS; - - spin_lock_init(&mvi->lock); - for (i = 0; i < mvi->chip->n_phy; i++) { - mvs_phy_init(mvi, i); - mvi->port[i].wide_port_phymap = 0; - mvi->port[i].port_attached = 0; - INIT_LIST_HEAD(&mvi->port[i].list); - } - for (i = 0; i < MVS_MAX_DEVICES; i++) { - mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED; - mvi->devices[i].dev_type = NO_DEVICE; - mvi->devices[i].device_id = i; - mvi->devices[i].dev_status = MVS_DEV_NORMAL; - } - - /* - * alloc and init our DMA areas - */ - mvi->tx = dma_alloc_coherent(mvi->dev, - sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ, - &mvi->tx_dma, GFP_KERNEL); - if (!mvi->tx) - goto err_out; - memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ); - mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ, - &mvi->rx_fis_dma, GFP_KERNEL); - if (!mvi->rx_fis) - goto err_out; - memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ); - - mvi->rx = dma_alloc_coherent(mvi->dev, - sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1), - &mvi->rx_dma, GFP_KERNEL); - if (!mvi->rx) - goto err_out; - memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1)); - mvi->rx[0] = cpu_to_le32(0xfff); - mvi->rx_cons = 0xfff; - - mvi->slot = dma_alloc_coherent(mvi->dev, - sizeof(*mvi->slot) * slot_nr, - &mvi->slot_dma, GFP_KERNEL); - if (!mvi->slot) - goto err_out; - memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr); - -#ifndef DISABLE_HOTPLUG_DMA_FIX - mvi->bulk_buffer = dma_alloc_coherent(mvi->dev, - TRASH_BUCKET_SIZE, - &mvi->bulk_buffer_dma, GFP_KERNEL); - if (!mvi->bulk_buffer) - goto err_out; -#endif - for (i = 0; i < slot_nr; i++) { - struct mvs_slot_info *slot = &mvi->slot_info[i]; - - slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ, - &slot->buf_dma, GFP_KERNEL); - if (!slot->buf) { - printk(KERN_DEBUG"failed to allocate slot->buf.\n"); - goto err_out; - } - memset(slot->buf, 0, MVS_SLOT_BUF_SZ); - ++mvi->tags_num; - } - /* Initialize tags */ - mvs_tag_init(mvi); - return 0; -err_out: - return 1; -} - - -int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex) -{ - unsigned long res_start, res_len, res_flag, res_flag_ex = 0; - struct pci_dev *pdev = mvi->pdev; - if (bar_ex != -1) { - /* - * ioremap main and peripheral registers - */ - res_start = pci_resource_start(pdev, bar_ex); - res_len = pci_resource_len(pdev, bar_ex); - if (!res_start || !res_len) - goto err_out; - - res_flag_ex = pci_resource_flags(pdev, bar_ex); - if (res_flag_ex & IORESOURCE_MEM) { - if (res_flag_ex & IORESOURCE_CACHEABLE) - mvi->regs_ex = ioremap(res_start, res_len); - else - mvi->regs_ex = ioremap_nocache(res_start, - res_len); - } else - mvi->regs_ex = (void *)res_start; - if (!mvi->regs_ex) - goto err_out; - } - - res_start = pci_resource_start(pdev, bar); - res_len = pci_resource_len(pdev, bar); - if (!res_start || !res_len) - goto err_out; - - res_flag = pci_resource_flags(pdev, bar); - if (res_flag & IORESOURCE_CACHEABLE) - mvi->regs = ioremap(res_start, res_len); - else - mvi->regs = ioremap_nocache(res_start, res_len); - - if (!mvi->regs) { - if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM)) - iounmap(mvi->regs_ex); - mvi->regs_ex = NULL; - goto err_out; - } - - return 0; -err_out: - return -1; -} - -void mvs_iounmap(void __iomem *regs) -{ - iounmap(regs); -} - -static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev, - const struct pci_device_id *ent, - struct Scsi_Host *shost, unsigned int id) -{ - struct mvs_info *mvi; - struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); - - mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info), - GFP_KERNEL); - if (!mvi) - return NULL; - - mvi->pdev = pdev; - mvi->dev = &pdev->dev; - mvi->chip_id = ent->driver_data; - mvi->chip = &mvs_chips[mvi->chip_id]; - INIT_LIST_HEAD(&mvi->wq_list); - mvi->irq = pdev->irq; - - ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi; - ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy; - - mvi->id = id; - mvi->sas = sha; - mvi->shost = shost; -#ifdef MVS_USE_TASKLET - tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha); -#endif - - if (MVS_CHIP_DISP->chip_ioremap(mvi)) - goto err_out; - if (!mvs_alloc(mvi, shost)) - return mvi; -err_out: - mvs_free(mvi); - return NULL; -} - -/* move to PCI layer or libata core? */ -static int pci_go_64(struct pci_dev *pdev) -{ - int rc; - - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (rc) { - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "64-bit DMA enable failed\n"); - return rc; - } - } - } else { - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "32-bit DMA enable failed\n"); - return rc; - } - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc) { - dev_printk(KERN_ERR, &pdev->dev, - "32-bit consistent DMA enable failed\n"); - return rc; - } - } - - return rc; -} - -static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost, - const struct mvs_chip_info *chip_info) -{ - int phy_nr, port_nr; unsigned short core_nr; - struct asd_sas_phy **arr_phy; - struct asd_sas_port **arr_port; - struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); - - core_nr = chip_info->n_host; - phy_nr = core_nr * chip_info->n_phy; - port_nr = phy_nr; - - memset(sha, 0x00, sizeof(struct sas_ha_struct)); - arr_phy = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL); - arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL); - if (!arr_phy || !arr_port) - goto exit_free; - - sha->sas_phy = arr_phy; - sha->sas_port = arr_port; - - sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL); - if (!sha->lldd_ha) - goto exit_free; - - ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr; - - shost->transportt = mvs_stt; - shost->max_id = 128; - shost->max_lun = ~0; - shost->max_channel = 1; - shost->max_cmd_len = 16; - - return 0; -exit_free: - kfree(arr_phy); - kfree(arr_port); - return -1; - -} - -static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost, - const struct mvs_chip_info *chip_info) -{ - int can_queue, i = 0, j = 0; - struct mvs_info *mvi = NULL; - struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); - unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; - - for (j = 0; j < nr_core; j++) { - mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; - for (i = 0; i < chip_info->n_phy; i++) { - sha->sas_phy[j * chip_info->n_phy + i] = - &mvi->phy[i].sas_phy; - sha->sas_port[j * chip_info->n_phy + i] = - &mvi->port[i].sas_port; - } - } - - sha->sas_ha_name = DRV_NAME; - sha->dev = mvi->dev; - sha->lldd_module = THIS_MODULE; - sha->sas_addr = &mvi->sas_addr[0]; - - sha->num_phys = nr_core * chip_info->n_phy; - - sha->lldd_max_execute_num = 1; - - if (mvi->flags & MVF_FLAG_SOC) - can_queue = MVS_SOC_CAN_QUEUE; - else - can_queue = MVS_CAN_QUEUE; - - sha->lldd_queue_size = can_queue; - shost->can_queue = can_queue; - mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys; - sha->core.shost = mvi->shost; -} - -static void mvs_init_sas_add(struct mvs_info *mvi) -{ - u8 i; - for (i = 0; i < mvi->chip->n_phy; i++) { - mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL; - mvi->phy[i].dev_sas_addr = - cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr)); - } - - memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE); -} - -static int __devinit mvs_pci_init(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - unsigned int rc, nhost = 0; - struct mvs_info *mvi; - irq_handler_t irq_handler = mvs_interrupt; - struct Scsi_Host *shost = NULL; - const struct mvs_chip_info *chip; - - dev_printk(KERN_INFO, &pdev->dev, - "mvsas: driver version %s\n", DRV_VERSION); - rc = pci_enable_device(pdev); - if (rc) - goto err_out_enable; - - pci_set_master(pdev); - - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) - goto err_out_disable; - - rc = pci_go_64(pdev); - if (rc) - goto err_out_regions; - - shost = scsi_host_alloc(&mvs_sht, sizeof(void *)); - if (!shost) { - rc = -ENOMEM; - goto err_out_regions; - } - - chip = &mvs_chips[ent->driver_data]; - SHOST_TO_SAS_HA(shost) = - kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL); - if (!SHOST_TO_SAS_HA(shost)) { - kfree(shost); - rc = -ENOMEM; - goto err_out_regions; - } - - rc = mvs_prep_sas_ha_init(shost, chip); - if (rc) { - kfree(shost); - rc = -ENOMEM; - goto err_out_regions; - } - - pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); - - do { - mvi = mvs_pci_alloc(pdev, ent, shost, nhost); - if (!mvi) { - rc = -ENOMEM; - goto err_out_regions; - } - - mvs_init_sas_add(mvi); - - mvi->instance = nhost; - rc = MVS_CHIP_DISP->chip_init(mvi); - if (rc) { - mvs_free(mvi); - goto err_out_regions; - } - nhost++; - } while (nhost < chip->n_host); - - mvs_post_sas_ha_init(shost, chip); - - rc = scsi_add_host(shost, &pdev->dev); - if (rc) - goto err_out_shost; - - rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); - if (rc) - goto err_out_shost; - rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, - DRV_NAME, SHOST_TO_SAS_HA(shost)); - if (rc) - goto err_not_sas; - - MVS_CHIP_DISP->interrupt_enable(mvi); - - scsi_scan_host(mvi->shost); - - return 0; - -err_not_sas: - sas_unregister_ha(SHOST_TO_SAS_HA(shost)); -err_out_shost: - scsi_remove_host(mvi->shost); -err_out_regions: - pci_release_regions(pdev); -err_out_disable: - pci_disable_device(pdev); -err_out_enable: - return rc; -} - -static void __devexit mvs_pci_remove(struct pci_dev *pdev) -{ - unsigned short core_nr, i = 0; - struct sas_ha_struct *sha = pci_get_drvdata(pdev); - struct mvs_info *mvi = NULL; - - core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; - mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0]; - -#ifdef MVS_USE_TASKLET - tasklet_kill(&mv_tasklet); -#endif - - pci_set_drvdata(pdev, NULL); - sas_unregister_ha(sha); - sas_remove_host(mvi->shost); - scsi_remove_host(mvi->shost); - - MVS_CHIP_DISP->interrupt_disable(mvi); - free_irq(mvi->irq, sha); - for (i = 0; i < core_nr; i++) { - mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i]; - mvs_free(mvi); - } - kfree(sha->sas_phy); - kfree(sha->sas_port); - kfree(sha); - pci_release_regions(pdev); - pci_disable_device(pdev); - return; -} - -static struct pci_device_id __devinitdata mvs_pci_table[] = { - { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 }, - { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 }, - { - .vendor = PCI_VENDOR_ID_MARVELL, - .device = 0x6440, - .subvendor = PCI_ANY_ID, - .subdevice = 0x6480, - .class = 0, - .class_mask = 0, - .driver_data = chip_6485, - }, - { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 }, - { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 }, - { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 }, - { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 }, - - { } /* terminate list */ -}; - -static struct pci_driver mvs_pci_driver = { - .name = DRV_NAME, - .id_table = mvs_pci_table, - .probe = mvs_pci_init, - .remove = __devexit_p(mvs_pci_remove), -}; - -/* task handler */ -struct task_struct *mvs_th; -static int __init mvs_init(void) -{ - int rc; - mvs_stt = sas_domain_attach_transport(&mvs_transport_ops); - if (!mvs_stt) - return -ENOMEM; - - rc = pci_register_driver(&mvs_pci_driver); - - if (rc) - goto err_out; - - return 0; - -err_out: - sas_release_transport(mvs_stt); - return rc; -} - -static void __exit mvs_exit(void) -{ - pci_unregister_driver(&mvs_pci_driver); - sas_release_transport(mvs_stt); -} - -module_init(mvs_init); -module_exit(mvs_exit); - -MODULE_AUTHOR("Jeff Garzik "); -MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver"); -MODULE_VERSION(DRV_VERSION); -MODULE_LICENSE("GPL"); -#ifdef CONFIG_PCI -MODULE_DEVICE_TABLE(pci, mvs_pci_table); -#endif diff --git a/trunk/drivers/scsi/mvsas/mv_sas.c b/trunk/drivers/scsi/mvsas/mv_sas.c deleted file mode 100644 index 0d2138641214..000000000000 --- a/trunk/drivers/scsi/mvsas/mv_sas.c +++ /dev/null @@ -1,2154 +0,0 @@ -/* - * Marvell 88SE64xx/88SE94xx main function - * - * Copyright 2007 Red Hat, Inc. - * Copyright 2008 Marvell. - * - * This file is licensed under GPLv2. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; version 2 of the - * License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA -*/ - -#include "mv_sas.h" - -static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag) -{ - if (task->lldd_task) { - struct mvs_slot_info *slot; - slot = task->lldd_task; - *tag = slot->slot_tag; - return 1; - } - return 0; -} - -void mvs_tag_clear(struct mvs_info *mvi, u32 tag) -{ - void *bitmap = &mvi->tags; - clear_bit(tag, bitmap); -} - -void mvs_tag_free(struct mvs_info *mvi, u32 tag) -{ - mvs_tag_clear(mvi, tag); -} - -void mvs_tag_set(struct mvs_info *mvi, unsigned int tag) -{ - void *bitmap = &mvi->tags; - set_bit(tag, bitmap); -} - -inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out) -{ - unsigned int index, tag; - void *bitmap = &mvi->tags; - - index = find_first_zero_bit(bitmap, mvi->tags_num); - tag = index; - if (tag >= mvi->tags_num) - return -SAS_QUEUE_FULL; - mvs_tag_set(mvi, tag); - *tag_out = tag; - return 0; -} - -void mvs_tag_init(struct mvs_info *mvi) -{ - int i; - for (i = 0; i < mvi->tags_num; ++i) - mvs_tag_clear(mvi, i); -} - -void mvs_hexdump(u32 size, u8 *data, u32 baseaddr) -{ - u32 i; - u32 run; - u32 offset; - - offset = 0; - while (size) { - printk(KERN_DEBUG"%08X : ", baseaddr + offset); - if (size >= 16) - run = 16; - else - run = size; - size -= run; - for (i = 0; i < 16; i++) { - if (i < run) - printk(KERN_DEBUG"%02X ", (u32)data[i]); - else - printk(KERN_DEBUG" "); - } - printk(KERN_DEBUG": "); - for (i = 0; i < run; i++) - printk(KERN_DEBUG"%c", - isalnum(data[i]) ? data[i] : '.'); - printk(KERN_DEBUG"\n"); - data = &data[16]; - offset += run; - } - printk(KERN_DEBUG"\n"); -} - -#if (_MV_DUMP > 1) -static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag, - enum sas_protocol proto) -{ - u32 offset; - struct mvs_slot_info *slot = &mvi->slot_info[tag]; - - offset = slot->cmd_size + MVS_OAF_SZ + - MVS_CHIP_DISP->prd_size() * slot->n_elem; - dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n", - tag); - mvs_hexdump(32, (u8 *) slot->response, - (u32) slot->buf_dma + offset); -} -#endif - -static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag, - enum sas_protocol proto) -{ -#if (_MV_DUMP > 1) - u32 sz, w_ptr; - u64 addr; - struct mvs_slot_info *slot = &mvi->slot_info[tag]; - - /*Delivery Queue */ - sz = MVS_CHIP_SLOT_SZ; - w_ptr = slot->tx; - addr = mvi->tx_dma; - dev_printk(KERN_DEBUG, mvi->dev, - "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr); - dev_printk(KERN_DEBUG, mvi->dev, - "Delivery Queue Base Address=0x%llX (PA)" - "(tx_dma=0x%llX), Entry=%04d\n", - addr, (unsigned long long)mvi->tx_dma, w_ptr); - mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]), - (u32) mvi->tx_dma + sizeof(u32) * w_ptr); - /*Command List */ - addr = mvi->slot_dma; - dev_printk(KERN_DEBUG, mvi->dev, - "Command List Base Address=0x%llX (PA)" - "(slot_dma=0x%llX), Header=%03d\n", - addr, (unsigned long long)slot->buf_dma, tag); - dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag); - /*mvs_cmd_hdr */ - mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]), - (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr)); - /*1.command table area */ - dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n"); - mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma); - /*2.open address frame area */ - dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n"); - mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size, - (u32) slot->buf_dma + slot->cmd_size); - /*3.status buffer */ - mvs_hba_sb_dump(mvi, tag, proto); - /*4.PRD table */ - dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n"); - mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem, - (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ, - (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ); -#endif -} - -static void mvs_hba_cq_dump(struct mvs_info *mvi) -{ -#if (_MV_DUMP > 2) - u64 addr; - void __iomem *regs = mvi->regs; - u32 entry = mvi->rx_cons + 1; - u32 rx_desc = le32_to_cpu(mvi->rx[entry]); - - /*Completion Queue */ - addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO); - dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n", - mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task); - dev_printk(KERN_DEBUG, mvi->dev, - "Completion List Base Address=0x%llX (PA), " - "CQ_Entry=%04d, CQ_WP=0x%08X\n", - addr, entry - 1, mvi->rx[0]); - mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc), - mvi->rx_dma + sizeof(u32) * entry); -#endif -} - -void mvs_get_sas_addr(void *buf, u32 buflen) -{ - /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/ -} - -struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) -{ - unsigned long i = 0, j = 0, hi = 0; - struct sas_ha_struct *sha = dev->port->ha; - struct mvs_info *mvi = NULL; - struct asd_sas_phy *phy; - - while (sha->sas_port[i]) { - if (sha->sas_port[i] == dev->port) { - phy = container_of(sha->sas_port[i]->phy_list.next, - struct asd_sas_phy, port_phy_el); - j = 0; - while (sha->sas_phy[j]) { - if (sha->sas_phy[j] == phy) - break; - j++; - } - break; - } - i++; - } - hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; - mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; - - return mvi; - -} - -/* FIXME */ -int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) -{ - unsigned long i = 0, j = 0, n = 0, num = 0; - struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; - struct mvs_info *mvi = mvi_dev->mvi_info; - struct sas_ha_struct *sha = dev->port->ha; - - while (sha->sas_port[i]) { - if (sha->sas_port[i] == dev->port) { - struct asd_sas_phy *phy; - list_for_each_entry(phy, - &sha->sas_port[i]->phy_list, port_phy_el) { - j = 0; - while (sha->sas_phy[j]) { - if (sha->sas_phy[j] == phy) - break; - j++; - } - phyno[n] = (j >= mvi->chip->n_phy) ? - (j - mvi->chip->n_phy) : j; - num++; - n++; - } - break; - } - i++; - } - return num; -} - -static inline void mvs_free_reg_set(struct mvs_info *mvi, - struct mvs_device *dev) -{ - if (!dev) { - mv_printk("device has been free.\n"); - return; - } - if (dev->runing_req != 0) - return; - if (dev->taskfileset == MVS_ID_NOT_MAPPED) - return; - MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset); -} - -static inline u8 mvs_assign_reg_set(struct mvs_info *mvi, - struct mvs_device *dev) -{ - if (dev->taskfileset != MVS_ID_NOT_MAPPED) - return 0; - return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset); -} - -void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard) -{ - u32 no; - for_each_phy(phy_mask, phy_mask, no) { - if (!(phy_mask & 1)) - continue; - MVS_CHIP_DISP->phy_reset(mvi, no, hard); - } -} - -/* FIXME: locking? */ -int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, - void *funcdata) -{ - int rc = 0, phy_id = sas_phy->id; - u32 tmp, i = 0, hi; - struct sas_ha_struct *sha = sas_phy->ha; - struct mvs_info *mvi = NULL; - - while (sha->sas_phy[i]) { - if (sha->sas_phy[i] == sas_phy) - break; - i++; - } - hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; - mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; - - switch (func) { - case PHY_FUNC_SET_LINK_RATE: - MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata); - break; - - case PHY_FUNC_HARD_RESET: - tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); - if (tmp & PHY_RST_HARD) - break; - MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1); - break; - - case PHY_FUNC_LINK_RESET: - MVS_CHIP_DISP->phy_enable(mvi, phy_id); - MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0); - break; - - case PHY_FUNC_DISABLE: - MVS_CHIP_DISP->phy_disable(mvi, phy_id); - break; - case PHY_FUNC_RELEASE_SPINUP_HOLD: - default: - rc = -EOPNOTSUPP; - } - msleep(200); - return rc; -} - -void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id, - u32 off_lo, u32 off_hi, u64 sas_addr) -{ - u32 lo = (u32)sas_addr; - u32 hi = (u32)(sas_addr>>32); - - MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo); - MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo); - MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi); - MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi); -} - -static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) -{ - struct mvs_phy *phy = &mvi->phy[i]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - struct sas_ha_struct *sas_ha; - if (!phy->phy_attached) - return; - - if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK) - && phy->phy_type & PORT_TYPE_SAS) { - return; - } - - sas_ha = mvi->sas; - sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE); - - if (sas_phy->phy) { - struct sas_phy *sphy = sas_phy->phy; - - sphy->negotiated_linkrate = sas_phy->linkrate; - sphy->minimum_linkrate = phy->minimum_linkrate; - sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; - sphy->maximum_linkrate = phy->maximum_linkrate; - sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate(); - } - - if (phy->phy_type & PORT_TYPE_SAS) { - struct sas_identify_frame *id; - - id = (struct sas_identify_frame *)phy->frame_rcvd; - id->dev_type = phy->identify.device_type; - id->initiator_bits = SAS_PROTOCOL_ALL; - id->target_bits = phy->identify.target_port_protocols; - } else if (phy->phy_type & PORT_TYPE_SATA) { - /*Nothing*/ - } - mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy); - - sas_phy->frame_rcvd_size = phy->frame_rcvd_size; - - mvi->sas->notify_port_event(sas_phy, - PORTE_BYTES_DMAED); -} - -int mvs_slave_alloc(struct scsi_device *scsi_dev) -{ - struct domain_device *dev = sdev_to_domain_dev(scsi_dev); - if (dev_is_sata(dev)) { - /* We don't need to rescan targets - * if REPORT_LUNS request is failed - */ - if (scsi_dev->lun > 0) - return -ENXIO; - scsi_dev->tagged_supported = 1; - } - - return sas_slave_alloc(scsi_dev); -} - -int mvs_slave_configure(struct scsi_device *sdev) -{ - struct domain_device *dev = sdev_to_domain_dev(sdev); - int ret = sas_slave_configure(sdev); - - if (ret) - return ret; - if (dev_is_sata(dev)) { - /* may set PIO mode */ - #if MV_DISABLE_NCQ - struct ata_port *ap = dev->sata_dev.ap; - struct ata_device *adev = ap->link.device; - adev->flags |= ATA_DFLAG_NCQ_OFF; - scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); - #endif - } - return 0; -} - -void mvs_scan_start(struct Scsi_Host *shost) -{ - int i, j; - unsigned short core_nr; - struct mvs_info *mvi; - struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); - - core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; - - for (j = 0; j < core_nr; j++) { - mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; - for (i = 0; i < mvi->chip->n_phy; ++i) - mvs_bytes_dmaed(mvi, i); - } -} - -int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time) -{ - /* give the phy enabling interrupt event time to come in (1s - * is empirically about all it takes) */ - if (time < HZ) - return 0; - /* Wait for discovery to finish */ - scsi_flush_work(shost); - return 1; -} - -static int mvs_task_prep_smp(struct mvs_info *mvi, - struct mvs_task_exec_info *tei) -{ - int elem, rc, i; - struct sas_task *task = tei->task; - struct mvs_cmd_hdr *hdr = tei->hdr; - struct domain_device *dev = task->dev; - struct asd_sas_port *sas_port = dev->port; - struct scatterlist *sg_req, *sg_resp; - u32 req_len, resp_len, tag = tei->tag; - void *buf_tmp; - u8 *buf_oaf; - dma_addr_t buf_tmp_dma; - void *buf_prd; - struct mvs_slot_info *slot = &mvi->slot_info[tag]; - u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); -#if _MV_DUMP - u8 *buf_cmd; - void *from; -#endif - /* - * DMA-map SMP request, response buffers - */ - sg_req = &task->smp_task.smp_req; - elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE); - if (!elem) - return -ENOMEM; - req_len = sg_dma_len(sg_req); - - sg_resp = &task->smp_task.smp_resp; - elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); - if (!elem) { - rc = -ENOMEM; - goto err_out; - } - resp_len = SB_RFB_MAX; - - /* must be in dwords */ - if ((req_len & 0x3) || (resp_len & 0x3)) { - rc = -EINVAL; - goto err_out_2; - } - - /* - * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs - */ - - /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */ - buf_tmp = slot->buf; - buf_tmp_dma = slot->buf_dma; - -#if _MV_DUMP - buf_cmd = buf_tmp; - hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); - buf_tmp += req_len; - buf_tmp_dma += req_len; - slot->cmd_size = req_len; -#else - hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); -#endif - - /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ - buf_oaf = buf_tmp; - hdr->open_frame = cpu_to_le64(buf_tmp_dma); - - buf_tmp += MVS_OAF_SZ; - buf_tmp_dma += MVS_OAF_SZ; - - /* region 3: PRD table *********************************** */ - buf_prd = buf_tmp; - if (tei->n_elem) - hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); - else - hdr->prd_tbl = 0; - - i = MVS_CHIP_DISP->prd_size() * tei->n_elem; - buf_tmp += i; - buf_tmp_dma += i; - - /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ - slot->response = buf_tmp; - hdr->status_buf = cpu_to_le64(buf_tmp_dma); - if (mvi->flags & MVF_FLAG_SOC) - hdr->reserved[0] = 0; - - /* - * Fill in TX ring and command slot header - */ - slot->tx = mvi->tx_prod; - mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | - TXQ_MODE_I | tag | - (sas_port->phy_mask << TXQ_PHY_SHIFT)); - - hdr->flags |= flags; - hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); - hdr->tags = cpu_to_le32(tag); - hdr->data_len = 0; - - /* generate open address frame hdr (first 12 bytes) */ - /* initiator, SMP, ftype 1h */ - buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01; - buf_oaf[1] = dev->linkrate & 0xf; - *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ - memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); - - /* fill in PRD (scatter/gather) table, if any */ - MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); - -#if _MV_DUMP - /* copy cmd table */ - from = kmap_atomic(sg_page(sg_req), KM_IRQ0); - memcpy(buf_cmd, from + sg_req->offset, req_len); - kunmap_atomic(from, KM_IRQ0); -#endif - return 0; - -err_out_2: - dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1, - PCI_DMA_FROMDEVICE); -err_out: - dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1, - PCI_DMA_TODEVICE); - return rc; -} - -static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag) -{ - struct ata_queued_cmd *qc = task->uldd_task; - - if (qc) { - if (qc->tf.command == ATA_CMD_FPDMA_WRITE || - qc->tf.command == ATA_CMD_FPDMA_READ) { - *tag = qc->tag; - return 1; - } - } - - return 0; -} - -static int mvs_task_prep_ata(struct mvs_info *mvi, - struct mvs_task_exec_info *tei) -{ - struct sas_task *task = tei->task; - struct domain_device *dev = task->dev; - struct mvs_device *mvi_dev = dev->lldd_dev; - struct mvs_cmd_hdr *hdr = tei->hdr; - struct asd_sas_port *sas_port = dev->port; - struct mvs_slot_info *slot; - void *buf_prd; - u32 tag = tei->tag, hdr_tag; - u32 flags, del_q; - void *buf_tmp; - u8 *buf_cmd, *buf_oaf; - dma_addr_t buf_tmp_dma; - u32 i, req_len, resp_len; - const u32 max_resp_len = SB_RFB_MAX; - - if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) { - mv_dprintk("Have not enough regiset for dev %d.\n", - mvi_dev->device_id); - return -EBUSY; - } - slot = &mvi->slot_info[tag]; - slot->tx = mvi->tx_prod; - del_q = TXQ_MODE_I | tag | - (TXQ_CMD_STP << TXQ_CMD_SHIFT) | - (sas_port->phy_mask << TXQ_PHY_SHIFT) | - (mvi_dev->taskfileset << TXQ_SRS_SHIFT); - mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); - -#ifndef DISABLE_HOTPLUG_DMA_FIX - if (task->data_dir == DMA_FROM_DEVICE) - flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); - else - flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); -#else - flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); -#endif - if (task->ata_task.use_ncq) - flags |= MCH_FPDMA; - if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { - if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) - flags |= MCH_ATAPI; - } - - /* FIXME: fill in port multiplier number */ - - hdr->flags = cpu_to_le32(flags); - - /* FIXME: the low order order 5 bits for the TAG if enable NCQ */ - if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) - task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); - else - hdr_tag = tag; - - hdr->tags = cpu_to_le32(hdr_tag); - - hdr->data_len = cpu_to_le32(task->total_xfer_len); - - /* - * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs - */ - - /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */ - buf_cmd = buf_tmp = slot->buf; - buf_tmp_dma = slot->buf_dma; - - hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); - - buf_tmp += MVS_ATA_CMD_SZ; - buf_tmp_dma += MVS_ATA_CMD_SZ; -#if _MV_DUMP - slot->cmd_size = MVS_ATA_CMD_SZ; -#endif - - /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ - /* used for STP. unused for SATA? */ - buf_oaf = buf_tmp; - hdr->open_frame = cpu_to_le64(buf_tmp_dma); - - buf_tmp += MVS_OAF_SZ; - buf_tmp_dma += MVS_OAF_SZ; - - /* region 3: PRD table ********************************************* */ - buf_prd = buf_tmp; - - if (tei->n_elem) - hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); - else - hdr->prd_tbl = 0; - i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count(); - - buf_tmp += i; - buf_tmp_dma += i; - - /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ - /* FIXME: probably unused, for SATA. kept here just in case - * we get a STP/SATA error information record - */ - slot->response = buf_tmp; - hdr->status_buf = cpu_to_le64(buf_tmp_dma); - if (mvi->flags & MVF_FLAG_SOC) - hdr->reserved[0] = 0; - - req_len = sizeof(struct host_to_dev_fis); - resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - - sizeof(struct mvs_err_info) - i; - - /* request, response lengths */ - resp_len = min(resp_len, max_resp_len); - hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); - - if (likely(!task->ata_task.device_control_reg_update)) - task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ - /* fill in command FIS and ATAPI CDB */ - memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); - if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) - memcpy(buf_cmd + STP_ATAPI_CMD, - task->ata_task.atapi_packet, 16); - - /* generate open address frame hdr (first 12 bytes) */ - /* initiator, STP, ftype 1h */ - buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1; - buf_oaf[1] = dev->linkrate & 0xf; - *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); - memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); - - /* fill in PRD (scatter/gather) table, if any */ - MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); -#ifndef DISABLE_HOTPLUG_DMA_FIX - if (task->data_dir == DMA_FROM_DEVICE) - MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma, - TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); -#endif - return 0; -} - -static int mvs_task_prep_ssp(struct mvs_info *mvi, - struct mvs_task_exec_info *tei, int is_tmf, - struct mvs_tmf_task *tmf) -{ - struct sas_task *task = tei->task; - struct mvs_cmd_hdr *hdr = tei->hdr; - struct mvs_port *port = tei->port; - struct domain_device *dev = task->dev; - struct mvs_device *mvi_dev = dev->lldd_dev; - struct asd_sas_port *sas_port = dev->port; - struct mvs_slot_info *slot; - void *buf_prd; - struct ssp_frame_hdr *ssp_hdr; - void *buf_tmp; - u8 *buf_cmd, *buf_oaf, fburst = 0; - dma_addr_t buf_tmp_dma; - u32 flags; - u32 resp_len, req_len, i, tag = tei->tag; - const u32 max_resp_len = SB_RFB_MAX; - u32 phy_mask; - - slot = &mvi->slot_info[tag]; - - phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap : - sas_port->phy_mask) & TXQ_PHY_MASK; - - slot->tx = mvi->tx_prod; - mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | - (TXQ_CMD_SSP << TXQ_CMD_SHIFT) | - (phy_mask << TXQ_PHY_SHIFT)); - - flags = MCH_RETRY; - if (task->ssp_task.enable_first_burst) { - flags |= MCH_FBURST; - fburst = (1 << 7); - } - if (is_tmf) - flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT); - else - flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT); - hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT)); - hdr->tags = cpu_to_le32(tag); - hdr->data_len = cpu_to_le32(task->total_xfer_len); - - /* - * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs - */ - - /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ - buf_cmd = buf_tmp = slot->buf; - buf_tmp_dma = slot->buf_dma; - - hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); - - buf_tmp += MVS_SSP_CMD_SZ; - buf_tmp_dma += MVS_SSP_CMD_SZ; -#if _MV_DUMP - slot->cmd_size = MVS_SSP_CMD_SZ; -#endif - - /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */ - buf_oaf = buf_tmp; - hdr->open_frame = cpu_to_le64(buf_tmp_dma); - - buf_tmp += MVS_OAF_SZ; - buf_tmp_dma += MVS_OAF_SZ; - - /* region 3: PRD table ********************************************* */ - buf_prd = buf_tmp; - if (tei->n_elem) - hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); - else - hdr->prd_tbl = 0; - - i = MVS_CHIP_DISP->prd_size() * tei->n_elem; - buf_tmp += i; - buf_tmp_dma += i; - - /* region 4: status buffer (larger the PRD, smaller this buf) ****** */ - slot->response = buf_tmp; - hdr->status_buf = cpu_to_le64(buf_tmp_dma); - if (mvi->flags & MVF_FLAG_SOC) - hdr->reserved[0] = 0; - - resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - - sizeof(struct mvs_err_info) - i; - resp_len = min(resp_len, max_resp_len); - - req_len = sizeof(struct ssp_frame_hdr) + 28; - - /* request, response lengths */ - hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); - - /* generate open address frame hdr (first 12 bytes) */ - /* initiator, SSP, ftype 1h */ - buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1; - buf_oaf[1] = dev->linkrate & 0xf; - *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); - memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); - - /* fill in SSP frame header (Command Table.SSP frame header) */ - ssp_hdr = (struct ssp_frame_hdr *)buf_cmd; - - if (is_tmf) - ssp_hdr->frame_type = SSP_TASK; - else - ssp_hdr->frame_type = SSP_COMMAND; - - memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr, - HASHED_SAS_ADDR_SIZE); - memcpy(ssp_hdr->hashed_src_addr, - dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); - ssp_hdr->tag = cpu_to_be16(tag); - - /* fill in IU for TASK and Command Frame */ - buf_cmd += sizeof(*ssp_hdr); - memcpy(buf_cmd, &task->ssp_task.LUN, 8); - - if (ssp_hdr->frame_type != SSP_TASK) { - buf_cmd[9] = fburst | task->ssp_task.task_attr | - (task->ssp_task.task_prio << 3); - memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16); - } else{ - buf_cmd[10] = tmf->tmf; - switch (tmf->tmf) { - case TMF_ABORT_TASK: - case TMF_QUERY_TASK: - buf_cmd[12] = - (tmf->tag_of_task_to_be_managed >> 8) & 0xff; - buf_cmd[13] = - tmf->tag_of_task_to_be_managed & 0xff; - break; - default: - break; - } - } - /* fill in PRD (scatter/gather) table, if any */ - MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); - return 0; -} - -#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) -static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, - struct completion *completion,int is_tmf, - struct mvs_tmf_task *tmf) -{ - struct domain_device *dev = task->dev; - struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; - struct mvs_info *mvi = mvi_dev->mvi_info; - struct mvs_task_exec_info tei; - struct sas_task *t = task; - struct mvs_slot_info *slot; - u32 tag = 0xdeadbeef, rc, n_elem = 0; - u32 n = num, pass = 0; - unsigned long flags = 0; - - if (!dev->port) { - struct task_status_struct *tsm = &t->task_status; - - tsm->resp = SAS_TASK_UNDELIVERED; - tsm->stat = SAS_PHY_DOWN; - t->task_done(t); - return 0; - } - - spin_lock_irqsave(&mvi->lock, flags); - do { - dev = t->dev; - mvi_dev = dev->lldd_dev; - if (DEV_IS_GONE(mvi_dev)) { - if (mvi_dev) - mv_dprintk("device %d not ready.\n", - mvi_dev->device_id); - else - mv_dprintk("device %016llx not ready.\n", - SAS_ADDR(dev->sas_addr)); - - rc = SAS_PHY_DOWN; - goto out_done; - } - - if (dev->port->id >= mvi->chip->n_phy) - tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy]; - else - tei.port = &mvi->port[dev->port->id]; - - if (!tei.port->port_attached) { - if (sas_protocol_ata(t->task_proto)) { - mv_dprintk("port %d does not" - "attached device.\n", dev->port->id); - rc = SAS_PHY_DOWN; - goto out_done; - } else { - struct task_status_struct *ts = &t->task_status; - ts->resp = SAS_TASK_UNDELIVERED; - ts->stat = SAS_PHY_DOWN; - t->task_done(t); - if (n > 1) - t = list_entry(t->list.next, - struct sas_task, list); - continue; - } - } - - if (!sas_protocol_ata(t->task_proto)) { - if (t->num_scatter) { - n_elem = dma_map_sg(mvi->dev, - t->scatter, - t->num_scatter, - t->data_dir); - if (!n_elem) { - rc = -ENOMEM; - goto err_out; - } - } - } else { - n_elem = t->num_scatter; - } - - rc = mvs_tag_alloc(mvi, &tag); - if (rc) - goto err_out; - - slot = &mvi->slot_info[tag]; - - - t->lldd_task = NULL; - slot->n_elem = n_elem; - slot->slot_tag = tag; - memset(slot->buf, 0, MVS_SLOT_BUF_SZ); - - tei.task = t; - tei.hdr = &mvi->slot[tag]; - tei.tag = tag; - tei.n_elem = n_elem; - switch (t->task_proto) { - case SAS_PROTOCOL_SMP: - rc = mvs_task_prep_smp(mvi, &tei); - break; - case SAS_PROTOCOL_SSP: - rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf); - break; - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: - rc = mvs_task_prep_ata(mvi, &tei); - break; - default: - dev_printk(KERN_ERR, mvi->dev, - "unknown sas_task proto: 0x%x\n", - t->task_proto); - rc = -EINVAL; - break; - } - - if (rc) { - mv_dprintk("rc is %x\n", rc); - goto err_out_tag; - } - slot->task = t; - slot->port = tei.port; - t->lldd_task = slot; - list_add_tail(&slot->entry, &tei.port->list); - /* TODO: select normal or high priority */ - spin_lock(&t->task_state_lock); - t->task_state_flags |= SAS_TASK_AT_INITIATOR; - spin_unlock(&t->task_state_lock); - - mvs_hba_memory_dump(mvi, tag, t->task_proto); - mvi_dev->runing_req++; - ++pass; - mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); - if (n > 1) - t = list_entry(t->list.next, struct sas_task, list); - } while (--n); - rc = 0; - goto out_done; - -err_out_tag: - mvs_tag_free(mvi, tag); -err_out: - - dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); - if (!sas_protocol_ata(t->task_proto)) - if (n_elem) - dma_unmap_sg(mvi->dev, t->scatter, n_elem, - t->data_dir); -out_done: - if (likely(pass)) { - MVS_CHIP_DISP->start_delivery(mvi, - (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1)); - } - spin_unlock_irqrestore(&mvi->lock, flags); - return rc; -} - -int mvs_queue_command(struct sas_task *task, const int num, - gfp_t gfp_flags) -{ - return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL); -} - -static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc) -{ - u32 slot_idx = rx_desc & RXQ_SLOT_MASK; - mvs_tag_clear(mvi, slot_idx); -} - -static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task, - struct mvs_slot_info *slot, u32 slot_idx) -{ - if (!slot->task) - return; - if (!sas_protocol_ata(task->task_proto)) - if (slot->n_elem) - dma_unmap_sg(mvi->dev, task->scatter, - slot->n_elem, task->data_dir); - - switch (task->task_proto) { - case SAS_PROTOCOL_SMP: - dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1, - PCI_DMA_FROMDEVICE); - dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1, - PCI_DMA_TODEVICE); - break; - - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SSP: - default: - /* do nothing */ - break; - } - list_del_init(&slot->entry); - task->lldd_task = NULL; - slot->task = NULL; - slot->port = NULL; - slot->slot_tag = 0xFFFFFFFF; - mvs_slot_free(mvi, slot_idx); -} - -static void mvs_update_wideport(struct mvs_info *mvi, int i) -{ - struct mvs_phy *phy = &mvi->phy[i]; - struct mvs_port *port = phy->port; - int j, no; - - for_each_phy(port->wide_port_phymap, j, no) { - if (j & 1) { - MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, - PHYR_WIDE_PORT); - MVS_CHIP_DISP->write_port_cfg_data(mvi, no, - port->wide_port_phymap); - } else { - MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, - PHYR_WIDE_PORT); - MVS_CHIP_DISP->write_port_cfg_data(mvi, no, - 0); - } - } -} - -static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i) -{ - u32 tmp; - struct mvs_phy *phy = &mvi->phy[i]; - struct mvs_port *port = phy->port; - - tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i); - if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { - if (!port) - phy->phy_attached = 1; - return tmp; - } - - if (port) { - if (phy->phy_type & PORT_TYPE_SAS) { - port->wide_port_phymap &= ~(1U << i); - if (!port->wide_port_phymap) - port->port_attached = 0; - mvs_update_wideport(mvi, i); - } else if (phy->phy_type & PORT_TYPE_SATA) - port->port_attached = 0; - phy->port = NULL; - phy->phy_attached = 0; - phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); - } - return 0; -} - -static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf) -{ - u32 *s = (u32 *) buf; - - if (!s) - return NULL; - - MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); - s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); - - MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); - s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); - - MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); - s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); - - MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); - s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i); - - /* Workaround: take some ATAPI devices for ATA */ - if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) - s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); - - return s; -} - -static u32 mvs_is_sig_fis_received(u32 irq_status) -{ - return irq_status & PHYEV_SIG_FIS; -} - -void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) -{ - struct mvs_phy *phy = &mvi->phy[i]; - struct sas_identify_frame *id; - - id = (struct sas_identify_frame *)phy->frame_rcvd; - - if (get_st) { - phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i); - phy->phy_status = mvs_is_phy_ready(mvi, i); - } - - if (phy->phy_status) { - int oob_done = 0; - struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy; - - oob_done = MVS_CHIP_DISP->oob_done(mvi, i); - - MVS_CHIP_DISP->fix_phy_info(mvi, i, id); - if (phy->phy_type & PORT_TYPE_SATA) { - phy->identify.target_port_protocols = SAS_PROTOCOL_STP; - if (mvs_is_sig_fis_received(phy->irq_status)) { - phy->phy_attached = 1; - phy->att_dev_sas_addr = - i + mvi->id * mvi->chip->n_phy; - if (oob_done) - sas_phy->oob_mode = SATA_OOB_MODE; - phy->frame_rcvd_size = - sizeof(struct dev_to_host_fis); - mvs_get_d2h_reg(mvi, i, id); - } else { - u32 tmp; - dev_printk(KERN_DEBUG, mvi->dev, - "Phy%d : No sig fis\n", i); - tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i); - MVS_CHIP_DISP->write_port_irq_mask(mvi, i, - tmp | PHYEV_SIG_FIS); - phy->phy_attached = 0; - phy->phy_type &= ~PORT_TYPE_SATA; - MVS_CHIP_DISP->phy_reset(mvi, i, 0); - goto out_done; - } - } else if (phy->phy_type & PORT_TYPE_SAS - || phy->att_dev_info & PORT_SSP_INIT_MASK) { - phy->phy_attached = 1; - phy->identify.device_type = - phy->att_dev_info & PORT_DEV_TYPE_MASK; - - if (phy->identify.device_type == SAS_END_DEV) - phy->identify.target_port_protocols = - SAS_PROTOCOL_SSP; - else if (phy->identify.device_type != NO_DEVICE) - phy->identify.target_port_protocols = - SAS_PROTOCOL_SMP; - if (oob_done) - sas_phy->oob_mode = SAS_OOB_MODE; - phy->frame_rcvd_size = - sizeof(struct sas_identify_frame); - } - memcpy(sas_phy->attached_sas_addr, - &phy->att_dev_sas_addr, SAS_ADDR_SIZE); - - if (MVS_CHIP_DISP->phy_work_around) - MVS_CHIP_DISP->phy_work_around(mvi, i); - } - mv_dprintk("port %d attach dev info is %x\n", - i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); - mv_dprintk("port %d attach sas addr is %llx\n", - i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); -out_done: - if (get_st) - MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status); -} - -static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock) -{ - struct sas_ha_struct *sas_ha = sas_phy->ha; - struct mvs_info *mvi = NULL; int i = 0, hi; - struct mvs_phy *phy = sas_phy->lldd_phy; - struct asd_sas_port *sas_port = sas_phy->port; - struct mvs_port *port; - unsigned long flags = 0; - if (!sas_port) - return; - - while (sas_ha->sas_phy[i]) { - if (sas_ha->sas_phy[i] == sas_phy) - break; - i++; - } - hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; - mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; - if (sas_port->id >= mvi->chip->n_phy) - port = &mvi->port[sas_port->id - mvi->chip->n_phy]; - else - port = &mvi->port[sas_port->id]; - if (lock) - spin_lock_irqsave(&mvi->lock, flags); - port->port_attached = 1; - phy->port = port; - if (phy->phy_type & PORT_TYPE_SAS) { - port->wide_port_phymap = sas_port->phy_mask; - mv_printk("set wide port phy map %x\n", sas_port->phy_mask); - mvs_update_wideport(mvi, sas_phy->id); - } - if (lock) - spin_unlock_irqrestore(&mvi->lock, flags); -} - -static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock) -{ - /*Nothing*/ -} - - -void mvs_port_formed(struct asd_sas_phy *sas_phy) -{ - mvs_port_notify_formed(sas_phy, 1); -} - -void mvs_port_deformed(struct asd_sas_phy *sas_phy) -{ - mvs_port_notify_deformed(sas_phy, 1); -} - -struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) -{ - u32 dev; - for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { - if (mvi->devices[dev].dev_type == NO_DEVICE) { - mvi->devices[dev].device_id = dev; - return &mvi->devices[dev]; - } - } - - if (dev == MVS_MAX_DEVICES) - mv_printk("max support %d devices, ignore ..\n", - MVS_MAX_DEVICES); - - return NULL; -} - -void mvs_free_dev(struct mvs_device *mvi_dev) -{ - u32 id = mvi_dev->device_id; - memset(mvi_dev, 0, sizeof(*mvi_dev)); - mvi_dev->device_id = id; - mvi_dev->dev_type = NO_DEVICE; - mvi_dev->dev_status = MVS_DEV_NORMAL; - mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; -} - -int mvs_dev_found_notify(struct domain_device *dev, int lock) -{ - unsigned long flags = 0; - int res = 0; - struct mvs_info *mvi = NULL; - struct domain_device *parent_dev = dev->parent; - struct mvs_device *mvi_device; - - mvi = mvs_find_dev_mvi(dev); - - if (lock) - spin_lock_irqsave(&mvi->lock, flags); - - mvi_device = mvs_alloc_dev(mvi); - if (!mvi_device) { - res = -1; - goto found_out; - } - dev->lldd_dev = mvi_device; - mvi_device->dev_type = dev->dev_type; - mvi_device->mvi_info = mvi; - if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) { - int phy_id; - u8 phy_num = parent_dev->ex_dev.num_phys; - struct ex_phy *phy; - for (phy_id = 0; phy_id < phy_num; phy_id++) { - phy = &parent_dev->ex_dev.ex_phy[phy_id]; - if (SAS_ADDR(phy->attached_sas_addr) == - SAS_ADDR(dev->sas_addr)) { - mvi_device->attached_phy = phy_id; - break; - } - } - - if (phy_id == phy_num) { - mv_printk("Error: no attached dev:%016llx" - "at ex:%016llx.\n", - SAS_ADDR(dev->sas_addr), - SAS_ADDR(parent_dev->sas_addr)); - res = -1; - } - } - -found_out: - if (lock) - spin_unlock_irqrestore(&mvi->lock, flags); - return res; -} - -int mvs_dev_found(struct domain_device *dev) -{ - return mvs_dev_found_notify(dev, 1); -} - -void mvs_dev_gone_notify(struct domain_device *dev, int lock) -{ - unsigned long flags = 0; - struct mvs_device *mvi_dev = dev->lldd_dev; - struct mvs_info *mvi = mvi_dev->mvi_info; - - if (lock) - spin_lock_irqsave(&mvi->lock, flags); - - if (mvi_dev) { - mv_dprintk("found dev[%d:%x] is gone.\n", - mvi_dev->device_id, mvi_dev->dev_type); - mvs_free_reg_set(mvi, mvi_dev); - mvs_free_dev(mvi_dev); - } else { - mv_dprintk("found dev has gone.\n"); - } - dev->lldd_dev = NULL; - - if (lock) - spin_unlock_irqrestore(&mvi->lock, flags); -} - - -void mvs_dev_gone(struct domain_device *dev) -{ - mvs_dev_gone_notify(dev, 1); -} - -static struct sas_task *mvs_alloc_task(void) -{ - struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL); - - if (task) { - INIT_LIST_HEAD(&task->list); - spin_lock_init(&task->task_state_lock); - task->task_state_flags = SAS_TASK_STATE_PENDING; - init_timer(&task->timer); - init_completion(&task->completion); - } - return task; -} - -static void mvs_free_task(struct sas_task *task) -{ - if (task) { - BUG_ON(!list_empty(&task->list)); - kfree(task); - } -} - -static void mvs_task_done(struct sas_task *task) -{ - if (!del_timer(&task->timer)) - return; - complete(&task->completion); -} - -static void mvs_tmf_timedout(unsigned long data) -{ - struct sas_task *task = (struct sas_task *)data; - - task->task_state_flags |= SAS_TASK_STATE_ABORTED; - complete(&task->completion); -} - -/* XXX */ -#define MVS_TASK_TIMEOUT 20 -static int mvs_exec_internal_tmf_task(struct domain_device *dev, - void *parameter, u32 para_len, struct mvs_tmf_task *tmf) -{ - int res, retry; - struct sas_task *task = NULL; - - for (retry = 0; retry < 3; retry++) { - task = mvs_alloc_task(); - if (!task) - return -ENOMEM; - - task->dev = dev; - task->task_proto = dev->tproto; - - memcpy(&task->ssp_task, parameter, para_len); - task->task_done = mvs_task_done; - - task->timer.data = (unsigned long) task; - task->timer.function = mvs_tmf_timedout; - task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; - add_timer(&task->timer); - - res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf); - - if (res) { - del_timer(&task->timer); - mv_printk("executing internel task failed:%d\n", res); - goto ex_err; - } - - wait_for_completion(&task->completion); - res = -TMF_RESP_FUNC_FAILED; - /* Even TMF timed out, return direct. */ - if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { - if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { - mv_printk("TMF task[%x] timeout.\n", tmf->tmf); - goto ex_err; - } - } - - if (task->task_status.resp == SAS_TASK_COMPLETE && - task->task_status.stat == SAM_GOOD) { - res = TMF_RESP_FUNC_COMPLETE; - break; - } - - if (task->task_status.resp == SAS_TASK_COMPLETE && - task->task_status.stat == SAS_DATA_UNDERRUN) { - /* no error, but return the number of bytes of - * underrun */ - res = task->task_status.residual; - break; - } - - if (task->task_status.resp == SAS_TASK_COMPLETE && - task->task_status.stat == SAS_DATA_OVERRUN) { - mv_dprintk("blocked task error.\n"); - res = -EMSGSIZE; - break; - } else { - mv_dprintk(" task to dev %016llx response: 0x%x " - "status 0x%x\n", - SAS_ADDR(dev->sas_addr), - task->task_status.resp, - task->task_status.stat); - mvs_free_task(task); - task = NULL; - - } - } -ex_err: - BUG_ON(retry == 3 && task != NULL); - if (task != NULL) - mvs_free_task(task); - return res; -} - -static int mvs_debug_issue_ssp_tmf(struct domain_device *dev, - u8 *lun, struct mvs_tmf_task *tmf) -{ - struct sas_ssp_task ssp_task; - DECLARE_COMPLETION_ONSTACK(completion); - if (!(dev->tproto & SAS_PROTOCOL_SSP)) - return TMF_RESP_FUNC_ESUPP; - - strncpy((u8 *)&ssp_task.LUN, lun, 8); - - return mvs_exec_internal_tmf_task(dev, &ssp_task, - sizeof(ssp_task), tmf); -} - - -/* Standard mandates link reset for ATA (type 0) - and hard reset for SSP (type 1) , only for RECOVERY */ -static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) -{ - int rc; - struct sas_phy *phy = sas_find_local_phy(dev); - int reset_type = (dev->dev_type == SATA_DEV || - (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; - rc = sas_phy_reset(phy, reset_type); - msleep(2000); - return rc; -} - -/* mandatory SAM-3 */ -int mvs_lu_reset(struct domain_device *dev, u8 *lun) -{ - unsigned long flags; - int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; - struct mvs_tmf_task tmf_task; - struct mvs_device * mvi_dev = dev->lldd_dev; - struct mvs_info *mvi = mvi_dev->mvi_info; - - tmf_task.tmf = TMF_LU_RESET; - mvi_dev->dev_status = MVS_DEV_EH; - rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); - if (rc == TMF_RESP_FUNC_COMPLETE) { - num = mvs_find_dev_phyno(dev, phyno); - spin_lock_irqsave(&mvi->lock, flags); - for (i = 0; i < num; i++) - mvs_release_task(mvi, phyno[i], dev); - spin_unlock_irqrestore(&mvi->lock, flags); - } - /* If failed, fall-through I_T_Nexus reset */ - mv_printk("%s for device[%x]:rc= %d\n", __func__, - mvi_dev->device_id, rc); - return rc; -} - -int mvs_I_T_nexus_reset(struct domain_device *dev) -{ - unsigned long flags; - int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED; - struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev; - struct mvs_info *mvi = mvi_dev->mvi_info; - - if (mvi_dev->dev_status != MVS_DEV_EH) - return TMF_RESP_FUNC_COMPLETE; - rc = mvs_debug_I_T_nexus_reset(dev); - mv_printk("%s for device[%x]:rc= %d\n", - __func__, mvi_dev->device_id, rc); - - /* housekeeper */ - num = mvs_find_dev_phyno(dev, phyno); - spin_lock_irqsave(&mvi->lock, flags); - for (i = 0; i < num; i++) - mvs_release_task(mvi, phyno[i], dev); - spin_unlock_irqrestore(&mvi->lock, flags); - - return rc; -} -/* optional SAM-3 */ -int mvs_query_task(struct sas_task *task) -{ - u32 tag; - struct scsi_lun lun; - struct mvs_tmf_task tmf_task; - int rc = TMF_RESP_FUNC_FAILED; - - if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { - struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; - struct domain_device *dev = task->dev; - struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; - struct mvs_info *mvi = mvi_dev->mvi_info; - - int_to_scsilun(cmnd->device->lun, &lun); - rc = mvs_find_tag(mvi, task, &tag); - if (rc == 0) { - rc = TMF_RESP_FUNC_FAILED; - return rc; - } - - tmf_task.tmf = TMF_QUERY_TASK; - tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); - - rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); - switch (rc) { - /* The task is still in Lun, release it then */ - case TMF_RESP_FUNC_SUCC: - /* The task is not in Lun or failed, reset the phy */ - case TMF_RESP_FUNC_FAILED: - case TMF_RESP_FUNC_COMPLETE: - break; - } - } - mv_printk("%s:rc= %d\n", __func__, rc); - return rc; -} - -/* mandatory SAM-3, still need free task/slot info */ -int mvs_abort_task(struct sas_task *task) -{ - struct scsi_lun lun; - struct mvs_tmf_task tmf_task; - struct domain_device *dev = task->dev; - struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; - struct mvs_info *mvi = mvi_dev->mvi_info; - int rc = TMF_RESP_FUNC_FAILED; - unsigned long flags; - u32 tag; - - if (mvi->exp_req) - mvi->exp_req--; - spin_lock_irqsave(&task->task_state_lock, flags); - if (task->task_state_flags & SAS_TASK_STATE_DONE) { - spin_unlock_irqrestore(&task->task_state_lock, flags); - rc = TMF_RESP_FUNC_COMPLETE; - goto out; - } - spin_unlock_irqrestore(&task->task_state_lock, flags); - if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { - struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task; - - int_to_scsilun(cmnd->device->lun, &lun); - rc = mvs_find_tag(mvi, task, &tag); - if (rc == 0) { - mv_printk("No such tag in %s\n", __func__); - rc = TMF_RESP_FUNC_FAILED; - return rc; - } - - tmf_task.tmf = TMF_ABORT_TASK; - tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); - - rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task); - - /* if successful, clear the task and callback forwards.*/ - if (rc == TMF_RESP_FUNC_COMPLETE) { - u32 slot_no; - struct mvs_slot_info *slot; - - if (task->lldd_task) { - slot = task->lldd_task; - slot_no = (u32) (slot - mvi->slot_info); - mvs_slot_complete(mvi, slot_no, 1); - } - } - } else if (task->task_proto & SAS_PROTOCOL_SATA || - task->task_proto & SAS_PROTOCOL_STP) { - /* to do free register_set */ - } else { - /* SMP */ - - } -out: - if (rc != TMF_RESP_FUNC_COMPLETE) - mv_printk("%s:rc= %d\n", __func__, rc); - return rc; -} - -int mvs_abort_task_set(struct domain_device *dev, u8 *lun) -{ - int rc = TMF_RESP_FUNC_FAILED; - struct mvs_tmf_task tmf_task; - - tmf_task.tmf = TMF_ABORT_TASK_SET; - rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); - - return rc; -} - -int mvs_clear_aca(struct domain_device *dev, u8 *lun) -{ - int rc = TMF_RESP_FUNC_FAILED; - struct mvs_tmf_task tmf_task; - - tmf_task.tmf = TMF_CLEAR_ACA; - rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); - - return rc; -} - -int mvs_clear_task_set(struct domain_device *dev, u8 *lun) -{ - int rc = TMF_RESP_FUNC_FAILED; - struct mvs_tmf_task tmf_task; - - tmf_task.tmf = TMF_CLEAR_TASK_SET; - rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task); - - return rc; -} - -static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task, - u32 slot_idx, int err) -{ - struct mvs_device *mvi_dev = task->dev->lldd_dev; - struct task_status_struct *tstat = &task->task_status; - struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; - int stat = SAM_GOOD; - - - resp->frame_len = sizeof(struct dev_to_host_fis); - memcpy(&resp->ending_fis[0], - SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset), - sizeof(struct dev_to_host_fis)); - tstat->buf_valid_size = sizeof(*resp); - if (unlikely(err)) - stat = SAS_PROTO_RESPONSE; - return stat; -} - -static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task, - u32 slot_idx) -{ - struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; - int stat; - u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response)); - u32 tfs = 0; - enum mvs_port_type type = PORT_TYPE_SAS; - - if (err_dw0 & CMD_ISS_STPD) - MVS_CHIP_DISP->issue_stop(mvi, type, tfs); - - MVS_CHIP_DISP->command_active(mvi, slot_idx); - - stat = SAM_CHECK_COND; - switch (task->task_proto) { - case SAS_PROTOCOL_SSP: - stat = SAS_ABORTED_TASK; - break; - case SAS_PROTOCOL_SMP: - stat = SAM_CHECK_COND; - break; - - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: - { - if (err_dw0 == 0x80400002) - mv_printk("find reserved error, why?\n"); - - task->ata_task.use_ncq = 0; - stat = SAS_PROTO_RESPONSE; - mvs_sata_done(mvi, task, slot_idx, 1); - - } - break; - default: - break; - } - - return stat; -} - -int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags) -{ - u32 slot_idx = rx_desc & RXQ_SLOT_MASK; - struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; - struct sas_task *task = slot->task; - struct mvs_device *mvi_dev = NULL; - struct task_status_struct *tstat; - - bool aborted; - void *to; - enum exec_status sts; - - if (mvi->exp_req) - mvi->exp_req--; - if (unlikely(!task || !task->lldd_task)) - return -1; - - tstat = &task->task_status; - mvi_dev = task->dev->lldd_dev; - - mvs_hba_cq_dump(mvi); - - spin_lock(&task->task_state_lock); - task->task_state_flags &= - ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); - task->task_state_flags |= SAS_TASK_STATE_DONE; - /* race condition*/ - aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; - spin_unlock(&task->task_state_lock); - - memset(tstat, 0, sizeof(*tstat)); - tstat->resp = SAS_TASK_COMPLETE; - - if (unlikely(aborted)) { - tstat->stat = SAS_ABORTED_TASK; - if (mvi_dev) - mvi_dev->runing_req--; - if (sas_protocol_ata(task->task_proto)) - mvs_free_reg_set(mvi, mvi_dev); - - mvs_slot_task_free(mvi, task, slot, slot_idx); - return -1; - } - - if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) { - mv_dprintk("port has not device.\n"); - tstat->stat = SAS_PHY_DOWN; - goto out; - } - - /* - if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) { - mv_dprintk("Find device[%016llx] RXQ_ERR %X, - err info:%016llx\n", - SAS_ADDR(task->dev->sas_addr), - rx_desc, (u64)(*(u64 *) slot->response)); - } - */ - - /* error info record present */ - if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) { - tstat->stat = mvs_slot_err(mvi, task, slot_idx); - goto out; - } - - switch (task->task_proto) { - case SAS_PROTOCOL_SSP: - /* hw says status == 0, datapres == 0 */ - if (rx_desc & RXQ_GOOD) { - tstat->stat = SAM_GOOD; - tstat->resp = SAS_TASK_COMPLETE; - } - /* response frame present */ - else if (rx_desc & RXQ_RSP) { - struct ssp_response_iu *iu = slot->response + - sizeof(struct mvs_err_info); - sas_ssp_task_response(mvi->dev, task, iu); - } else - tstat->stat = SAM_CHECK_COND; - break; - - case SAS_PROTOCOL_SMP: { - struct scatterlist *sg_resp = &task->smp_task.smp_resp; - tstat->stat = SAM_GOOD; - to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); - memcpy(to + sg_resp->offset, - slot->response + sizeof(struct mvs_err_info), - sg_dma_len(sg_resp)); - kunmap_atomic(to, KM_IRQ0); - break; - } - - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: { - tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); - break; - } - - default: - tstat->stat = SAM_CHECK_COND; - break; - } - -out: - if (mvi_dev) { - mvi_dev->runing_req--; - if (sas_protocol_ata(task->task_proto)) - mvs_free_reg_set(mvi, mvi_dev); - } - mvs_slot_task_free(mvi, task, slot, slot_idx); - sts = tstat->stat; - - spin_unlock(&mvi->lock); - if (task->task_done) - task->task_done(task); - else - mv_dprintk("why has not task_done.\n"); - spin_lock(&mvi->lock); - - return sts; -} - -void mvs_release_task(struct mvs_info *mvi, - int phy_no, struct domain_device *dev) -{ - int i = 0; u32 slot_idx; - struct mvs_phy *phy; - struct mvs_port *port; - struct mvs_slot_info *slot, *slot2; - - phy = &mvi->phy[phy_no]; - port = phy->port; - if (!port) - return; - - list_for_each_entry_safe(slot, slot2, &port->list, entry) { - struct sas_task *task; - slot_idx = (u32) (slot - mvi->slot_info); - task = slot->task; - - if (dev && task->dev != dev) - continue; - - mv_printk("Release slot [%x] tag[%x], task [%p]:\n", - slot_idx, slot->slot_tag, task); - - if (task->task_proto & SAS_PROTOCOL_SSP) { - mv_printk("attached with SSP task CDB["); - for (i = 0; i < 16; i++) - mv_printk(" %02x", task->ssp_task.cdb[i]); - mv_printk(" ]\n"); - } - - mvs_slot_complete(mvi, slot_idx, 1); - } -} - -static void mvs_phy_disconnected(struct mvs_phy *phy) -{ - phy->phy_attached = 0; - phy->att_dev_info = 0; - phy->att_dev_sas_addr = 0; -} - -static void mvs_work_queue(struct work_struct *work) -{ - struct delayed_work *dw = container_of(work, struct delayed_work, work); - struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q); - struct mvs_info *mvi = mwq->mvi; - unsigned long flags; - - spin_lock_irqsave(&mvi->lock, flags); - if (mwq->handler & PHY_PLUG_EVENT) { - u32 phy_no = (unsigned long) mwq->data; - struct sas_ha_struct *sas_ha = mvi->sas; - struct mvs_phy *phy = &mvi->phy[phy_no]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - - if (phy->phy_event & PHY_PLUG_OUT) { - u32 tmp; - struct sas_identify_frame *id; - id = (struct sas_identify_frame *)phy->frame_rcvd; - tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no); - phy->phy_event &= ~PHY_PLUG_OUT; - if (!(tmp & PHY_READY_MASK)) { - sas_phy_disconnected(sas_phy); - mvs_phy_disconnected(phy); - sas_ha->notify_phy_event(sas_phy, - PHYE_LOSS_OF_SIGNAL); - mv_dprintk("phy%d Removed Device\n", phy_no); - } else { - MVS_CHIP_DISP->detect_porttype(mvi, phy_no); - mvs_update_phyinfo(mvi, phy_no, 1); - mvs_bytes_dmaed(mvi, phy_no); - mvs_port_notify_formed(sas_phy, 0); - mv_dprintk("phy%d Attached Device\n", phy_no); - } - } - } - list_del(&mwq->entry); - spin_unlock_irqrestore(&mvi->lock, flags); - kfree(mwq); -} - -static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler) -{ - struct mvs_wq *mwq; - int ret = 0; - - mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC); - if (mwq) { - mwq->mvi = mvi; - mwq->data = data; - mwq->handler = handler; - MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq); - list_add_tail(&mwq->entry, &mvi->wq_list); - schedule_delayed_work(&mwq->work_q, HZ * 2); - } else - ret = -ENOMEM; - - return ret; -} - -static void mvs_sig_time_out(unsigned long tphy) -{ - struct mvs_phy *phy = (struct mvs_phy *)tphy; - struct mvs_info *mvi = phy->mvi; - u8 phy_no; - - for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) { - if (&mvi->phy[phy_no] == phy) { - mv_dprintk("Get signature time out, reset phy %d\n", - phy_no+mvi->id*mvi->chip->n_phy); - MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1); - } - } -} - -static void mvs_sig_remove_timer(struct mvs_phy *phy) -{ - if (phy->timer.function) - del_timer(&phy->timer); - phy->timer.function = NULL; -} - -void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) -{ - u32 tmp; - struct sas_ha_struct *sas_ha = mvi->sas; - struct mvs_phy *phy = &mvi->phy[phy_no]; - struct asd_sas_phy *sas_phy = &phy->sas_phy; - - phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); - mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy, - MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); - mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy, - phy->irq_status); - - /* - * events is port event now , - * we need check the interrupt status which belongs to per port. - */ - - if (phy->irq_status & PHYEV_DCDR_ERR) - mv_dprintk("port %d STP decoding error.\n", - phy_no+mvi->id*mvi->chip->n_phy); - - if (phy->irq_status & PHYEV_POOF) { - if (!(phy->phy_event & PHY_PLUG_OUT)) { - int dev_sata = phy->phy_type & PORT_TYPE_SATA; - int ready; - mvs_release_task(mvi, phy_no, NULL); - phy->phy_event |= PHY_PLUG_OUT; - mvs_handle_event(mvi, - (void *)(unsigned long)phy_no, - PHY_PLUG_EVENT); - ready = mvs_is_phy_ready(mvi, phy_no); - if (!ready) - mv_dprintk("phy%d Unplug Notice\n", - phy_no + - mvi->id * mvi->chip->n_phy); - if (ready || dev_sata) { - if (MVS_CHIP_DISP->stp_reset) - MVS_CHIP_DISP->stp_reset(mvi, - phy_no); - else - MVS_CHIP_DISP->phy_reset(mvi, - phy_no, 0); - return; - } - } - } - - if (phy->irq_status & PHYEV_COMWAKE) { - tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no); - MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no, - tmp | PHYEV_SIG_FIS); - if (phy->timer.function == NULL) { - phy->timer.data = (unsigned long)phy; - phy->timer.function = mvs_sig_time_out; - phy->timer.expires = jiffies + 10*HZ; - add_timer(&phy->timer); - } - } - if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { - phy->phy_status = mvs_is_phy_ready(mvi, phy_no); - mvs_sig_remove_timer(phy); - mv_dprintk("notify plug in on phy[%d]\n", phy_no); - if (phy->phy_status) { - mdelay(10); - MVS_CHIP_DISP->detect_porttype(mvi, phy_no); - if (phy->phy_type & PORT_TYPE_SATA) { - tmp = MVS_CHIP_DISP->read_port_irq_mask( - mvi, phy_no); - tmp &= ~PHYEV_SIG_FIS; - MVS_CHIP_DISP->write_port_irq_mask(mvi, - phy_no, tmp); - } - mvs_update_phyinfo(mvi, phy_no, 0); - mvs_bytes_dmaed(mvi, phy_no); - /* whether driver is going to handle hot plug */ - if (phy->phy_event & PHY_PLUG_OUT) { - mvs_port_notify_formed(sas_phy, 0); - phy->phy_event &= ~PHY_PLUG_OUT; - } - } else { - mv_dprintk("plugin interrupt but phy%d is gone\n", - phy_no + mvi->id*mvi->chip->n_phy); - } - } else if (phy->irq_status & PHYEV_BROAD_CH) { - mv_dprintk("port %d broadcast change.\n", - phy_no + mvi->id*mvi->chip->n_phy); - /* exception for Samsung disk drive*/ - mdelay(1000); - sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); - } - MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status); -} - -int mvs_int_rx(struct mvs_info *mvi, bool self_clear) -{ - u32 rx_prod_idx, rx_desc; - bool attn = false; - - /* the first dword in the RX ring is special: it contains - * a mirror of the hardware's RX producer index, so that - * we don't have to stall the CPU reading that register. - * The actual RX ring is offset by one dword, due to this. - */ - rx_prod_idx = mvi->rx_cons; - mvi->rx_cons = le32_to_cpu(mvi->rx[0]); - if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ - return 0; - - /* The CMPL_Q may come late, read from register and try again - * note: if coalescing is enabled, - * it will need to read from register every time for sure - */ - if (unlikely(mvi->rx_cons == rx_prod_idx)) - mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK; - - if (mvi->rx_cons == rx_prod_idx) - return 0; - - while (mvi->rx_cons != rx_prod_idx) { - /* increment our internal RX consumer pointer */ - rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); - rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); - - if (likely(rx_desc & RXQ_DONE)) - mvs_slot_complete(mvi, rx_desc, 0); - if (rx_desc & RXQ_ATTN) { - attn = true; - } else if (rx_desc & RXQ_ERR) { - if (!(rx_desc & RXQ_DONE)) - mvs_slot_complete(mvi, rx_desc, 0); - } else if (rx_desc & RXQ_SLOT_RESET) { - mvs_slot_free(mvi, rx_desc); - } - } - - if (attn && self_clear) - MVS_CHIP_DISP->int_full(mvi); - return 0; -} - diff --git a/trunk/drivers/scsi/mvsas/mv_sas.h b/trunk/drivers/scsi/mvsas/mv_sas.h deleted file mode 100644 index aa2270af1bac..000000000000 --- a/trunk/drivers/scsi/mvsas/mv_sas.h +++ /dev/null @@ -1,406 +0,0 @@ -/* - * Marvell 88SE64xx/88SE94xx main function head file - * - * Copyright 2007 Red Hat, Inc. - * Copyright 2008 Marvell. - * - * This file is licensed under GPLv2. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation; version 2 of the - * License. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA -*/ - -#ifndef _MV_SAS_H_ -#define _MV_SAS_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "mv_defs.h" - -#define DRV_NAME "mvsas" -#define DRV_VERSION "0.8.2" -#define _MV_DUMP 0 -#define MVS_ID_NOT_MAPPED 0x7f -/* #define DISABLE_HOTPLUG_DMA_FIX */ -#define MAX_EXP_RUNNING_REQ 2 -#define WIDE_PORT_MAX_PHY 4 -#define MV_DISABLE_NCQ 0 -#define mv_printk(fmt, arg ...) \ - printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg) -#ifdef MV_DEBUG -#define mv_dprintk(format, arg...) \ - printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg) -#else -#define mv_dprintk(format, arg...) -#endif -#define MV_MAX_U32 0xffffffff - -extern struct mvs_tgt_initiator mvs_tgt; -extern struct mvs_info *tgt_mvi; -extern const struct mvs_dispatch mvs_64xx_dispatch; -extern const struct mvs_dispatch mvs_94xx_dispatch; - -#define DEV_IS_EXPANDER(type) \ - ((type == EDGE_DEV) || (type == FANOUT_DEV)) - -#define bit(n) ((u32)1 << n) - -#define for_each_phy(__lseq_mask, __mc, __lseq) \ - for ((__mc) = (__lseq_mask), (__lseq) = 0; \ - (__mc) != 0 ; \ - (++__lseq), (__mc) >>= 1) - -#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f) -#define UNASSOC_D2H_FIS(id) \ - ((void *) mvi->rx_fis + 0x100 * id) -#define SATA_RECEIVED_FIS_LIST(reg_set) \ - ((void *) mvi->rx_fis + mvi->chip->fis_offs + 0x100 * reg_set) -#define SATA_RECEIVED_SDB_FIS(reg_set) \ - (SATA_RECEIVED_FIS_LIST(reg_set) + 0x58) -#define SATA_RECEIVED_D2H_FIS(reg_set) \ - (SATA_RECEIVED_FIS_LIST(reg_set) + 0x40) -#define SATA_RECEIVED_PIO_FIS(reg_set) \ - (SATA_RECEIVED_FIS_LIST(reg_set) + 0x20) -#define SATA_RECEIVED_DMA_FIS(reg_set) \ - (SATA_RECEIVED_FIS_LIST(reg_set) + 0x00) - -enum dev_status { - MVS_DEV_NORMAL = 0x0, - MVS_DEV_EH = 0x1, -}; - - -struct mvs_info; - -struct mvs_dispatch { - char *name; - int (*chip_init)(struct mvs_info *mvi); - int (*spi_init)(struct mvs_info *mvi); - int (*chip_ioremap)(struct mvs_info *mvi); - void (*chip_iounmap)(struct mvs_info *mvi); - irqreturn_t (*isr)(struct mvs_info *mvi, int irq, u32 stat); - u32 (*isr_status)(struct mvs_info *mvi, int irq); - void (*interrupt_enable)(struct mvs_info *mvi); - void (*interrupt_disable)(struct mvs_info *mvi); - - u32 (*read_phy_ctl)(struct mvs_info *mvi, u32 port); - void (*write_phy_ctl)(struct mvs_info *mvi, u32 port, u32 val); - - u32 (*read_port_cfg_data)(struct mvs_info *mvi, u32 port); - void (*write_port_cfg_data)(struct mvs_info *mvi, u32 port, u32 val); - void (*write_port_cfg_addr)(struct mvs_info *mvi, u32 port, u32 addr); - - u32 (*read_port_vsr_data)(struct mvs_info *mvi, u32 port); - void (*write_port_vsr_data)(struct mvs_info *mvi, u32 port, u32 val); - void (*write_port_vsr_addr)(struct mvs_info *mvi, u32 port, u32 addr); - - u32 (*read_port_irq_stat)(struct mvs_info *mvi, u32 port); - void (*write_port_irq_stat)(struct mvs_info *mvi, u32 port, u32 val); - - u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port); - void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val); - - void (*get_sas_addr)(void *buf, u32 buflen); - void (*command_active)(struct mvs_info *mvi, u32 slot_idx); - void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type, - u32 tfs); - void (*start_delivery)(struct mvs_info *mvi, u32 tx); - u32 (*rx_update)(struct mvs_info *mvi); - void (*int_full)(struct mvs_info *mvi); - u8 (*assign_reg_set)(struct mvs_info *mvi, u8 *tfs); - void (*free_reg_set)(struct mvs_info *mvi, u8 *tfs); - u32 (*prd_size)(void); - u32 (*prd_count)(void); - void (*make_prd)(struct scatterlist *scatter, int nr, void *prd); - void (*detect_porttype)(struct mvs_info *mvi, int i); - int (*oob_done)(struct mvs_info *mvi, int i); - void (*fix_phy_info)(struct mvs_info *mvi, int i, - struct sas_identify_frame *id); - void (*phy_work_around)(struct mvs_info *mvi, int i); - void (*phy_set_link_rate)(struct mvs_info *mvi, u32 phy_id, - struct sas_phy_linkrates *rates); - u32 (*phy_max_link_rate)(void); - void (*phy_disable)(struct mvs_info *mvi, u32 phy_id); - void (*phy_enable)(struct mvs_info *mvi, u32 phy_id); - void (*phy_reset)(struct mvs_info *mvi, u32 phy_id, int hard); - void (*stp_reset)(struct mvs_info *mvi, u32 phy_id); - void (*clear_active_cmds)(struct mvs_info *mvi); - u32 (*spi_read_data)(struct mvs_info *mvi); - void (*spi_write_data)(struct mvs_info *mvi, u32 data); - int (*spi_buildcmd)(struct mvs_info *mvi, - u32 *dwCmd, - u8 cmd, - u8 read, - u8 length, - u32 addr - ); - int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd); - int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout); -#ifndef DISABLE_HOTPLUG_DMA_FIX - void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd); -#endif - -}; - -struct mvs_chip_info { - u32 n_host; - u32 n_phy; - u32 fis_offs; - u32 fis_count; - u32 srs_sz; - u32 slot_width; - const struct mvs_dispatch *dispatch; -}; -#define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width) -#define MVS_RX_FISL_SZ \ - (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100)) -#define MVS_CHIP_DISP (mvi->chip->dispatch) - -struct mvs_err_info { - __le32 flags; - __le32 flags2; -}; - -struct mvs_cmd_hdr { - __le32 flags; /* PRD tbl len; SAS, SATA ctl */ - __le32 lens; /* cmd, max resp frame len */ - __le32 tags; /* targ port xfer tag; tag */ - __le32 data_len; /* data xfer len */ - __le64 cmd_tbl; /* command table address */ - __le64 open_frame; /* open addr frame address */ - __le64 status_buf; /* status buffer address */ - __le64 prd_tbl; /* PRD tbl address */ - __le32 reserved[4]; -}; - -struct mvs_port { - struct asd_sas_port sas_port; - u8 port_attached; - u8 wide_port_phymap; - struct list_head list; -}; - -struct mvs_phy { - struct mvs_info *mvi; - struct mvs_port *port; - struct asd_sas_phy sas_phy; - struct sas_identify identify; - struct scsi_device *sdev; - struct timer_list timer; - u64 dev_sas_addr; - u64 att_dev_sas_addr; - u32 att_dev_info; - u32 dev_info; - u32 phy_type; - u32 phy_status; - u32 irq_status; - u32 frame_rcvd_size; - u8 frame_rcvd[32]; - u8 phy_attached; - u8 phy_mode; - u8 reserved[2]; - u32 phy_event; - enum sas_linkrate minimum_linkrate; - enum sas_linkrate maximum_linkrate; -}; - -struct mvs_device { - struct list_head dev_entry; - enum sas_dev_type dev_type; - struct mvs_info *mvi_info; - struct domain_device *sas_device; - u32 attached_phy; - u32 device_id; - u32 runing_req; - u8 taskfileset; - u8 dev_status; - u16 reserved; -}; - -struct mvs_slot_info { - struct list_head entry; - union { - struct sas_task *task; - void *tdata; - }; - u32 n_elem; - u32 tx; - u32 slot_tag; - - /* DMA buffer for storing cmd tbl, open addr frame, status buffer, - * and PRD table - */ - void *buf; - dma_addr_t buf_dma; -#if _MV_DUMP - u32 cmd_size; -#endif - void *response; - struct mvs_port *port; - struct mvs_device *device; - void *open_frame; -}; - -struct mvs_info { - unsigned long flags; - - /* host-wide lock */ - spinlock_t lock; - - /* our device */ - struct pci_dev *pdev; - struct device *dev; - - /* enhanced mode registers */ - void __iomem *regs; - - /* peripheral or soc registers */ - void __iomem *regs_ex; - u8 sas_addr[SAS_ADDR_SIZE]; - - /* SCSI/SAS glue */ - struct sas_ha_struct *sas; - struct Scsi_Host *shost; - - /* TX (delivery) DMA ring */ - __le32 *tx; - dma_addr_t tx_dma; - - /* cached next-producer idx */ - u32 tx_prod; - - /* RX (completion) DMA ring */ - __le32 *rx; - dma_addr_t rx_dma; - - /* RX consumer idx */ - u32 rx_cons; - - /* RX'd FIS area */ - __le32 *rx_fis; - dma_addr_t rx_fis_dma; - - /* DMA command header slots */ - struct mvs_cmd_hdr *slot; - dma_addr_t slot_dma; - - u32 chip_id; - const struct mvs_chip_info *chip; - - int tags_num; - DECLARE_BITMAP(tags, MVS_SLOTS); - /* further per-slot information */ - struct mvs_phy phy[MVS_MAX_PHYS]; - struct mvs_port port[MVS_MAX_PHYS]; - u32 irq; - u32 exp_req; - u32 id; - u64 sata_reg_set; - struct list_head *hba_list; - struct list_head soc_entry; - struct list_head wq_list; - unsigned long instance; - u16 flashid; - u32 flashsize; - u32 flashsectSize; - - void *addon; - struct mvs_device devices[MVS_MAX_DEVICES]; -#ifndef DISABLE_HOTPLUG_DMA_FIX - void *bulk_buffer; - dma_addr_t bulk_buffer_dma; -#define TRASH_BUCKET_SIZE 0x20000 -#endif - struct mvs_slot_info slot_info[0]; -}; - -struct mvs_prv_info{ - u8 n_host; - u8 n_phy; - u16 reserve; - struct mvs_info *mvi[2]; -}; - -struct mvs_wq { - struct delayed_work work_q; - struct mvs_info *mvi; - void *data; - int handler; - struct list_head entry; -}; - -struct mvs_task_exec_info { - struct sas_task *task; - struct mvs_cmd_hdr *hdr; - struct mvs_port *port; - u32 tag; - int n_elem; -}; - - -/******************** function prototype *********************/ -void mvs_get_sas_addr(void *buf, u32 buflen); -void mvs_tag_clear(struct mvs_info *mvi, u32 tag); -void mvs_tag_free(struct mvs_info *mvi, u32 tag); -void mvs_tag_set(struct mvs_info *mvi, unsigned int tag); -int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out); -void mvs_tag_init(struct mvs_info *mvi); -void mvs_iounmap(void __iomem *regs); -int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex); -void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard); -int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, - void *funcdata); -void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id, - u32 off_lo, u32 off_hi, u64 sas_addr); -int mvs_slave_alloc(struct scsi_device *scsi_dev); -int mvs_slave_configure(struct scsi_device *sdev); -void mvs_scan_start(struct Scsi_Host *shost); -int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time); -int mvs_queue_command(struct sas_task *task, const int num, - gfp_t gfp_flags); -int mvs_abort_task(struct sas_task *task); -int mvs_abort_task_set(struct domain_device *dev, u8 *lun); -int mvs_clear_aca(struct domain_device *dev, u8 *lun); -int mvs_clear_task_set(struct domain_device *dev, u8 * lun); -void mvs_port_formed(struct asd_sas_phy *sas_phy); -void mvs_port_deformed(struct asd_sas_phy *sas_phy); -int mvs_dev_found(struct domain_device *dev); -void mvs_dev_gone(struct domain_device *dev); -int mvs_lu_reset(struct domain_device *dev, u8 *lun); -int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags); -int mvs_I_T_nexus_reset(struct domain_device *dev); -int mvs_query_task(struct sas_task *task); -void mvs_release_task(struct mvs_info *mvi, int phy_no, - struct domain_device *dev); -void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events); -void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st); -int mvs_int_rx(struct mvs_info *mvi, bool self_clear); -void mvs_hexdump(u32 size, u8 *data, u32 baseaddr); -#endif - diff --git a/trunk/drivers/scsi/osd/Kbuild b/trunk/drivers/scsi/osd/Kbuild index 5fd73d77c3af..0e207aa67d16 100644 --- a/trunk/drivers/scsi/osd/Kbuild +++ b/trunk/drivers/scsi/osd/Kbuild @@ -11,6 +11,31 @@ # it under the terms of the GNU General Public License version 2 # +ifneq ($(OSD_INC),) +# we are built out-of-tree Kconfigure everything as on + +CONFIG_SCSI_OSD_INITIATOR=m +ccflags-y += -DCONFIG_SCSI_OSD_INITIATOR -DCONFIG_SCSI_OSD_INITIATOR_MODULE + +CONFIG_SCSI_OSD_ULD=m +ccflags-y += -DCONFIG_SCSI_OSD_ULD -DCONFIG_SCSI_OSD_ULD_MODULE + +# CONFIG_SCSI_OSD_DPRINT_SENSE = +# 0 - no print of errors +# 1 - print errors +# 2 - errors + warrnings +ccflags-y += -DCONFIG_SCSI_OSD_DPRINT_SENSE=1 + +# Uncomment to turn debug on +# ccflags-y += -DCONFIG_SCSI_OSD_DEBUG + +# if we are built out-of-tree and the hosting kernel has OSD headers +# then "ccflags-y +=" will not pick the out-off-tree headers. Only by doing +# this it will work. This might break in future kernels +LINUXINCLUDE := -I$(OSD_INC) $(LINUXINCLUDE) + +endif + # libosd.ko - osd-initiator library libosd-y := osd_initiator.o obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o diff --git a/trunk/drivers/scsi/osd/Makefile b/trunk/drivers/scsi/osd/Makefile new file mode 100755 index 000000000000..d905344f83ba --- /dev/null +++ b/trunk/drivers/scsi/osd/Makefile @@ -0,0 +1,37 @@ +# +# Makefile for the OSD modules (out of tree) +# +# Copyright (C) 2008 Panasas Inc. All rights reserved. +# +# Authors: +# Boaz Harrosh +# Benny Halevy +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 +# +# This Makefile is used to call the kernel Makefile in case of an out-of-tree +# build. +# $KSRC should point to a Kernel source tree otherwise host's default is +# used. (eg. /lib/modules/`uname -r`/build) + +# include path for out-of-tree Headers +OSD_INC ?= `pwd`/../../../include + +# allow users to override these +# e.g. to compile for a kernel that you aren't currently running +KSRC ?= /lib/modules/$(shell uname -r)/build +KBUILD_OUTPUT ?= +ARCH ?= +V ?= 0 + +# this is the basic Kbuild out-of-tree invocation, with the M= option +KBUILD_BASE = +$(MAKE) -C $(KSRC) M=`pwd` KBUILD_OUTPUT=$(KBUILD_OUTPUT) ARCH=$(ARCH) V=$(V) + +all: libosd + +libosd: ; + $(KBUILD_BASE) OSD_INC=$(OSD_INC) modules + +clean: + $(KBUILD_BASE) clean diff --git a/trunk/drivers/scsi/osd/osd_initiator.c b/trunk/drivers/scsi/osd/osd_initiator.c index 7a117c18114c..5776b2ab6b12 100644 --- a/trunk/drivers/scsi/osd/osd_initiator.c +++ b/trunk/drivers/scsi/osd/osd_initiator.c @@ -118,39 +118,39 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps) _osd_ver_desc(or)); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("VENDOR_IDENTIFICATION [%s]\n", + OSD_INFO("OSD_ATTR_RI_VENDOR_IDENTIFICATION [%s]\n", (char *)pFirst); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n", + OSD_INFO("OSD_ATTR_RI_PRODUCT_IDENTIFICATION [%s]\n", (char *)pFirst); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("PRODUCT_MODEL [%s]\n", + OSD_INFO("OSD_ATTR_RI_PRODUCT_MODEL [%s]\n", (char *)pFirst); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n", + OSD_INFO("OSD_ATTR_RI_PRODUCT_REVISION_LEVEL [%u]\n", pFirst ? get_unaligned_be32(pFirst) : ~0U); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("PRODUCT_SERIAL_NUMBER [%s]\n", + OSD_INFO("OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER [%s]\n", (char *)pFirst); pFirst = get_attrs[a].val_ptr; - OSD_INFO("OSD_NAME [%s]\n", (char *)pFirst); + OSD_INFO("OSD_ATTR_RI_OSD_NAME [%s]\n", (char *)pFirst); a++; pFirst = get_attrs[a++].val_ptr; - OSD_INFO("TOTAL_CAPACITY [0x%llx]\n", + OSD_INFO("OSD_ATTR_RI_TOTAL_CAPACITY [0x%llx]\n", pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("USED_CAPACITY [0x%llx]\n", + OSD_INFO("OSD_ATTR_RI_USED_CAPACITY [0x%llx]\n", pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); pFirst = get_attrs[a++].val_ptr; - OSD_INFO("NUMBER_OF_PARTITIONS [%llu]\n", + OSD_INFO("OSD_ATTR_RI_NUMBER_OF_PARTITIONS [%llu]\n", pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL); if (a >= nelem) @@ -158,7 +158,7 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps) /* FIXME: Where are the time utilities */ pFirst = get_attrs[a++].val_ptr; - OSD_INFO("CLOCK [0x%02x%02x%02x%02x%02x%02x]\n", + OSD_INFO("OSD_ATTR_RI_CLOCK [0x%02x%02x%02x%02x%02x%02x]\n", ((char *)pFirst)[0], ((char *)pFirst)[1], ((char *)pFirst)[2], ((char *)pFirst)[3], ((char *)pFirst)[4], ((char *)pFirst)[5]); @@ -169,8 +169,7 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps) hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1, sid_dump, sizeof(sid_dump), true); - OSD_INFO("OSD_SYSTEM_ID(%d)\n" - " [%s]\n", len, sid_dump); + OSD_INFO("OSD_ATTR_RI_OSD_SYSTEM_ID(%d) [%s]\n", len, sid_dump); a++; } out: @@ -670,7 +669,7 @@ static int _osd_req_list_objects(struct osd_request *or, __be16 action, const struct osd_obj_id *obj, osd_id initial_id, struct osd_obj_id_list *list, unsigned nelem) { - struct request_queue *q = osd_request_queue(or->osd_dev); + struct request_queue *q = or->osd_dev->scsi_device->request_queue; u64 len = nelem * sizeof(osd_id) + sizeof(*list); struct bio *bio; @@ -779,32 +778,16 @@ EXPORT_SYMBOL(osd_req_remove_object); */ void osd_req_write(struct osd_request *or, - const struct osd_obj_id *obj, u64 offset, - struct bio *bio, u64 len) + const struct osd_obj_id *obj, struct bio *bio, u64 offset) { - _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len); + _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, bio->bi_size); WARN_ON(or->out.bio || or->out.total_bytes); - WARN_ON(0 == bio_rw_flagged(bio, BIO_RW)); + bio->bi_rw |= (1 << BIO_RW); or->out.bio = bio; - or->out.total_bytes = len; + or->out.total_bytes = bio->bi_size; } EXPORT_SYMBOL(osd_req_write); -int osd_req_write_kern(struct osd_request *or, - const struct osd_obj_id *obj, u64 offset, void* buff, u64 len) -{ - struct request_queue *req_q = osd_request_queue(or->osd_dev); - struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL); - - if (IS_ERR(bio)) - return PTR_ERR(bio); - - bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */ - osd_req_write(or, obj, offset, bio, len); - return 0; -} -EXPORT_SYMBOL(osd_req_write_kern); - /*TODO: void osd_req_append(struct osd_request *, const struct osd_obj_id *, struct bio *data_out); */ /*TODO: void osd_req_create_write(struct osd_request *, @@ -830,31 +813,16 @@ void osd_req_flush_object(struct osd_request *or, EXPORT_SYMBOL(osd_req_flush_object); void osd_req_read(struct osd_request *or, - const struct osd_obj_id *obj, u64 offset, - struct bio *bio, u64 len) + const struct osd_obj_id *obj, struct bio *bio, u64 offset) { - _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); + _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, bio->bi_size); WARN_ON(or->in.bio || or->in.total_bytes); - WARN_ON(1 == bio_rw_flagged(bio, BIO_RW)); + bio->bi_rw &= ~(1 << BIO_RW); or->in.bio = bio; - or->in.total_bytes = len; + or->in.total_bytes = bio->bi_size; } EXPORT_SYMBOL(osd_req_read); -int osd_req_read_kern(struct osd_request *or, - const struct osd_obj_id *obj, u64 offset, void* buff, u64 len) -{ - struct request_queue *req_q = osd_request_queue(or->osd_dev); - struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL); - - if (IS_ERR(bio)) - return PTR_ERR(bio); - - osd_req_read(or, obj, offset, bio, len); - return 0; -} -EXPORT_SYMBOL(osd_req_read_kern); - void osd_req_get_attributes(struct osd_request *or, const struct osd_obj_id *obj) { @@ -1245,7 +1213,7 @@ static inline void osd_sec_parms_set_in_offset(bool is_v1, } static int _osd_req_finalize_data_integrity(struct osd_request *or, - bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key) + bool has_in, bool has_out, const u8 *cap_key) { struct osd_security_parameters *sec_parms = _osd_req_sec_params(or); int ret; @@ -1260,7 +1228,8 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or, }; unsigned pad; - or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes); + or->out_data_integ.data_bytes = cpu_to_be64( + or->out.bio ? or->out.bio->bi_size : 0); or->out_data_integ.set_attributes_bytes = cpu_to_be64( or->set_attr.total_bytes); or->out_data_integ.get_attributes_bytes = cpu_to_be64( @@ -1337,8 +1306,6 @@ static int _init_blk_request(struct osd_request *or, or->request = req; req->cmd_type = REQ_TYPE_BLOCK_PC; - req->cmd_flags |= REQ_QUIET; - req->timeout = or->timeout; req->retries = or->retries; req->sense = or->sense; @@ -1372,7 +1339,6 @@ int osd_finalize_request(struct osd_request *or, { struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); bool has_in, has_out; - u64 out_data_bytes = or->out.total_bytes; int ret; if (options & OSD_REQ_FUA) @@ -1422,8 +1388,7 @@ int osd_finalize_request(struct osd_request *or, } } - ret = _osd_req_finalize_data_integrity(or, has_in, has_out, - out_data_bytes, cap_key); + ret = _osd_req_finalize_data_integrity(or, has_in, has_out, cap_key); if (ret) return ret; diff --git a/trunk/drivers/scsi/osd/osd_uld.c b/trunk/drivers/scsi/osd/osd_uld.c index 0bdef3390902..22b59e13ba83 100644 --- a/trunk/drivers/scsi/osd/osd_uld.c +++ b/trunk/drivers/scsi/osd/osd_uld.c @@ -49,7 +49,6 @@ #include #include #include -#include #include #include @@ -176,9 +175,10 @@ static const struct file_operations osd_fops = { struct osd_dev *osduld_path_lookup(const char *name) { - struct osd_uld_device *oud; - struct osd_dev *od; - struct file *file; + struct path path; + struct inode *inode; + struct cdev *cdev; + struct osd_uld_device *uninitialized_var(oud); int error; if (!name || !*name) { @@ -186,46 +186,52 @@ struct osd_dev *osduld_path_lookup(const char *name) return ERR_PTR(-EINVAL); } - od = kzalloc(sizeof(*od), GFP_KERNEL); - if (!od) - return ERR_PTR(-ENOMEM); + error = kern_path(name, LOOKUP_FOLLOW, &path); + if (error) { + OSD_ERR("path_lookup of %s failed=>%d\n", name, error); + return ERR_PTR(error); + } - file = filp_open(name, O_RDWR, 0); - if (IS_ERR(file)) { - error = PTR_ERR(file); - goto free_od; + inode = path.dentry->d_inode; + error = -EINVAL; /* Not the right device e.g osd_uld_device */ + if (!S_ISCHR(inode->i_mode)) { + OSD_DEBUG("!S_ISCHR()\n"); + goto out; } - if (file->f_op != &osd_fops){ - error = -EINVAL; - goto close_file; + cdev = inode->i_cdev; + if (!cdev) { + OSD_ERR("Before mounting an OSD Based filesystem\n"); + OSD_ERR(" user-mode must open+close the %s device\n", name); + OSD_ERR(" Example: bash: echo < %s\n", name); + goto out; } - oud = file->private_data; + /* The Magic wand. Is it our char-dev */ + /* TODO: Support sg devices */ + if (cdev->owner != THIS_MODULE) { + OSD_ERR("Error mounting %s - is not an OSD device\n", name); + goto out; + } - *od = oud->od; - od->file = file; + oud = container_of(cdev, struct osd_uld_device, cdev); - return od; + __uld_get(oud); + error = 0; -close_file: - fput(file); -free_od: - kfree(od); - return ERR_PTR(error); +out: + path_put(&path); + return error ? ERR_PTR(error) : &oud->od; } EXPORT_SYMBOL(osduld_path_lookup); void osduld_put_device(struct osd_dev *od) { + if (od) { + struct osd_uld_device *oud = container_of(od, + struct osd_uld_device, od); - if (od && !IS_ERR(od)) { - struct osd_uld_device *oud = od->file->private_data; - - BUG_ON(od->scsi_device != oud->od.scsi_device); - - fput(od->file); - kfree(od); + __uld_put(oud); } } EXPORT_SYMBOL(osduld_put_device); diff --git a/trunk/drivers/scsi/qla1280.c b/trunk/drivers/scsi/qla1280.c index 8371d917a9a2..5defe5ea5eda 100644 --- a/trunk/drivers/scsi/qla1280.c +++ b/trunk/drivers/scsi/qla1280.c @@ -17,12 +17,9 @@ * General Public License for more details. * ******************************************************************************/ -#define QLA1280_VERSION "3.27" +#define QLA1280_VERSION "3.26" /***************************************************************************** Revision History: - Rev 3.27, February 10, 2009, Michael Reed - - General code cleanup. - - Improve error recovery. Rev 3.26, January 16, 2006 Jes Sorensen - Ditch all < 2.6 support Rev 3.25.1, February 10, 2005 Christoph Hellwig @@ -438,6 +435,7 @@ static int qla1280_mailbox_command(struct scsi_qla_host *, uint8_t, uint16_t *); static int qla1280_bus_reset(struct scsi_qla_host *, int); static int qla1280_device_reset(struct scsi_qla_host *, int, int); +static int qla1280_abort_device(struct scsi_qla_host *, int, int, int); static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int); static int qla1280_abort_isp(struct scsi_qla_host *); #ifdef QLA_64BIT_PTR @@ -700,7 +698,7 @@ qla1280_info(struct Scsi_Host *host) } /************************************************************************** - * qla1280_queuecommand + * qla1200_queuecommand * Queue a command to the controller. * * Note: @@ -715,14 +713,12 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) { struct Scsi_Host *host = cmd->device->host; struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; - struct srb *sp = (struct srb *)CMD_SP(cmd); + struct srb *sp = (struct srb *)&cmd->SCp; int status; cmd->scsi_done = fn; sp->cmd = cmd; sp->flags = 0; - sp->wait = NULL; - CMD_HANDLE(cmd) = (unsigned char *)NULL; qla1280_print_scsi_cmd(5, cmd); @@ -742,11 +738,21 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) enum action { ABORT_COMMAND, + ABORT_DEVICE, DEVICE_RESET, BUS_RESET, ADAPTER_RESET, + FAIL }; +/* timer action for error action processor */ +static void qla1280_error_wait_timeout(unsigned long __data) +{ + struct scsi_cmnd *cmd = (struct scsi_cmnd *)__data; + struct srb *sp = (struct srb *)CMD_SP(cmd); + + complete(sp->wait); +} static void qla1280_mailbox_timeout(unsigned long __data) { @@ -761,67 +767,8 @@ static void qla1280_mailbox_timeout(unsigned long __data) complete(ha->mailbox_wait); } -static int -_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp, - struct completion *wait) -{ - int status = FAILED; - struct scsi_cmnd *cmd = sp->cmd; - - spin_unlock_irq(ha->host->host_lock); - wait_for_completion_timeout(wait, 4*HZ); - spin_lock_irq(ha->host->host_lock); - sp->wait = NULL; - if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) { - status = SUCCESS; - (*cmd->scsi_done)(cmd); - } - return status; -} - -static int -qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp) -{ - DECLARE_COMPLETION_ONSTACK(wait); - - sp->wait = &wait; - return _qla1280_wait_for_single_command(ha, sp, &wait); -} - -static int -qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target) -{ - int cnt; - int status; - struct srb *sp; - struct scsi_cmnd *cmd; - - status = SUCCESS; - - /* - * Wait for all commands with the designated bus/target - * to be completed by the firmware - */ - for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { - sp = ha->outstanding_cmds[cnt]; - if (sp) { - cmd = sp->cmd; - - if (bus >= 0 && SCSI_BUS_32(cmd) != bus) - continue; - if (target >= 0 && SCSI_TCN_32(cmd) != target) - continue; - - status = qla1280_wait_for_single_command(ha, sp); - if (status == FAILED) - break; - } - } - return status; -} - /************************************************************************** - * qla1280_error_action + * qla1200_error_action * The function will attempt to perform a specified error action and * wait for the results (or time out). * @@ -833,6 +780,11 @@ qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target) * Returns: * SUCCESS or FAILED * + * Note: + * Resetting the bus always succeeds - is has to, otherwise the + * kernel will panic! Try a surgical technique - sending a BUS + * DEVICE RESET message - on the offending target before pulling + * the SCSI bus reset line. **************************************************************************/ static int qla1280_error_action(struct scsi_cmnd *cmd, enum action action) @@ -840,19 +792,13 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action) struct scsi_qla_host *ha; int bus, target, lun; struct srb *sp; - int i, found; - int result=FAILED; - int wait_for_bus=-1; - int wait_for_target = -1; + uint16_t data; + unsigned char *handle; + int result, i; DECLARE_COMPLETION_ONSTACK(wait); - - ENTER("qla1280_error_action"); + struct timer_list timer; ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata); - sp = (struct srb *)CMD_SP(cmd); - bus = SCSI_BUS_32(cmd); - target = SCSI_TCN_32(cmd); - lun = SCSI_LUN_32(cmd); dprintk(4, "error_action %i, istatus 0x%04x\n", action, RD_REG_WORD(&ha->iobase->istatus)); @@ -861,47 +807,99 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action) RD_REG_WORD(&ha->iobase->host_cmd), RD_REG_WORD(&ha->iobase->ictrl), jiffies); + ENTER("qla1280_error_action"); if (qla1280_verbose) printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, " "Handle=0x%p, action=0x%x\n", ha->host_no, cmd, CMD_HANDLE(cmd), action); + if (cmd == NULL) { + printk(KERN_WARNING "(scsi?:?:?:?) Reset called with NULL " + "si_Cmnd pointer, failing.\n"); + LEAVE("qla1280_error_action"); + return FAILED; + } + + ha = (struct scsi_qla_host *)cmd->device->host->hostdata; + sp = (struct srb *)CMD_SP(cmd); + handle = CMD_HANDLE(cmd); + + /* Check for pending interrupts. */ + data = qla1280_debounce_register(&ha->iobase->istatus); /* - * Check to see if we have the command in the outstanding_cmds[] - * array. If not then it must have completed before this error - * action was initiated. If the error_action isn't ABORT_COMMAND - * then the driver must proceed with the requested action. + * The io_request_lock is held when the reset handler is called, hence + * the interrupt handler cannot be running in parallel as it also + * grabs the lock. /Jes */ - found = -1; - for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) { - if (sp == ha->outstanding_cmds[i]) { - found = i; - sp->wait = &wait; /* we'll wait for it to complete */ - break; - } - } + if (data & RISC_INT) + qla1280_isr(ha, &ha->done_q); - if (found < 0) { /* driver doesn't have command */ - result = SUCCESS; - if (qla1280_verbose) { - printk(KERN_INFO - "scsi(%ld:%d:%d:%d): specified command has " - "already completed.\n", ha->host_no, bus, - target, lun); + /* + * Determine the suggested action that the mid-level driver wants + * us to perform. + */ + if (handle == (unsigned char *)INVALID_HANDLE || handle == NULL) { + if(action == ABORT_COMMAND) { + /* we never got this command */ + printk(KERN_INFO "qla1280: Aborting a NULL handle\n"); + return SUCCESS; /* no action - we don't have command */ } + } else { + sp->wait = &wait; } + bus = SCSI_BUS_32(cmd); + target = SCSI_TCN_32(cmd); + lun = SCSI_LUN_32(cmd); + + /* Overloading result. Here it means the success or fail of the + * *issue* of the action. When we return from the routine, it must + * mean the actual success or fail of the action */ + result = FAILED; switch (action) { + case FAIL: + break; case ABORT_COMMAND: - dprintk(1, "qla1280: RISC aborting command\n"); - /* - * The abort might fail due to race when the host_lock - * is released to issue the abort. As such, we - * don't bother to check the return status. - */ - if (found >= 0) - qla1280_abort_command(ha, sp, found); + if ((sp->flags & SRB_ABORT_PENDING)) { + printk(KERN_WARNING + "scsi(): Command has a pending abort " + "message - ABORT_PENDING.\n"); + /* This should technically be impossible since we + * now wait for abort completion */ + break; + } + + for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) { + if (sp == ha->outstanding_cmds[i]) { + dprintk(1, "qla1280: RISC aborting command\n"); + if (qla1280_abort_command(ha, sp, i) == 0) + result = SUCCESS; + else { + /* + * Since we don't know what might + * have happend to the command, it + * is unsafe to remove it from the + * device's queue at this point. + * Wait and let the escalation + * process take care of it. + */ + printk(KERN_WARNING + "scsi(%li:%i:%i:%i): Unable" + " to abort command!\n", + ha->host_no, bus, target, lun); + } + } + } + break; + + case ABORT_DEVICE: + if (qla1280_verbose) + printk(KERN_INFO + "scsi(%ld:%d:%d:%d): Queueing abort device " + "command.\n", ha->host_no, bus, target, lun); + if (qla1280_abort_device(ha, bus, target, lun) == 0) + result = SUCCESS; break; case DEVICE_RESET: @@ -909,21 +907,16 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action) printk(KERN_INFO "scsi(%ld:%d:%d:%d): Queueing device reset " "command.\n", ha->host_no, bus, target, lun); - if (qla1280_device_reset(ha, bus, target) == 0) { - /* issued device reset, set wait conditions */ - wait_for_bus = bus; - wait_for_target = target; - } + if (qla1280_device_reset(ha, bus, target) == 0) + result = SUCCESS; break; case BUS_RESET: if (qla1280_verbose) printk(KERN_INFO "qla1280(%ld:%d): Issued bus " "reset.\n", ha->host_no, bus); - if (qla1280_bus_reset(ha, bus) == 0) { - /* issued bus reset, set wait conditions */ - wait_for_bus = bus; - } + if (qla1280_bus_reset(ha, bus) == 0) + result = SUCCESS; break; case ADAPTER_RESET: @@ -936,48 +929,55 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action) "continue automatically\n", ha->host_no); } ha->flags.reset_active = 1; - - if (qla1280_abort_isp(ha) != 0) { /* it's dead */ - result = FAILED; - } + /* + * We restarted all of the commands automatically, so the + * mid-level code can expect completions momentitarily. + */ + if (qla1280_abort_isp(ha) == 0) + result = SUCCESS; ha->flags.reset_active = 0; } - /* - * At this point, the host_lock has been released and retaken - * by the issuance of the mailbox command. - * Wait for the command passed in by the mid-layer if it - * was found by the driver. It might have been returned - * between eh recovery steps, hence the check of the "found" - * variable. - */ - - if (found >= 0) - result = _qla1280_wait_for_single_command(ha, sp, &wait); + if (!list_empty(&ha->done_q)) + qla1280_done(ha); - if (action == ABORT_COMMAND && result != SUCCESS) { - printk(KERN_WARNING - "scsi(%li:%i:%i:%i): " - "Unable to abort command!\n", - ha->host_no, bus, target, lun); + /* If we didn't manage to issue the action, or we have no + * command to wait for, exit here */ + if (result == FAILED || handle == NULL || + handle == (unsigned char *)INVALID_HANDLE) { + /* + * Clear completion queue to avoid qla1280_done() trying + * to complete the command at a later stage after we + * have exited the current context + */ + sp->wait = NULL; + goto leave; } - /* - * If the command passed in by the mid-layer has been - * returned by the board, then wait for any additional - * commands which are supposed to complete based upon - * the error action. - * - * All commands are unconditionally returned during a - * call to qla1280_abort_isp(), ADAPTER_RESET. No need - * to wait for them. - */ - if (result == SUCCESS && wait_for_bus >= 0) { - result = qla1280_wait_for_pending_commands(ha, - wait_for_bus, wait_for_target); + /* set up a timer just in case we're really jammed */ + init_timer(&timer); + timer.expires = jiffies + 4*HZ; + timer.data = (unsigned long)cmd; + timer.function = qla1280_error_wait_timeout; + add_timer(&timer); + + /* wait for the action to complete (or the timer to expire) */ + spin_unlock_irq(ha->host->host_lock); + wait_for_completion(&wait); + del_timer_sync(&timer); + spin_lock_irq(ha->host->host_lock); + sp->wait = NULL; + + /* the only action we might get a fail for is abort */ + if (action == ABORT_COMMAND) { + if(sp->flags & SRB_ABORTED) + result = SUCCESS; + else + result = FAILED; } + leave: dprintk(1, "RESET returning %d\n", result); LEAVE("qla1280_error_action"); @@ -1280,12 +1280,13 @@ qla1280_done(struct scsi_qla_host *ha) switch ((CMD_RESULT(cmd) >> 16)) { case DID_RESET: /* Issue marker command. */ - if (!ha->flags.abort_isp_active) - qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); + qla1280_marker(ha, bus, target, 0, MK_SYNC_ID); break; case DID_ABORT: sp->flags &= ~SRB_ABORT_PENDING; sp->flags |= SRB_ABORTED; + if (sp->flags & SRB_TIMEOUT) + CMD_RESULT(sp->cmd) = DID_TIME_OUT << 16; break; default: break; @@ -1295,11 +1296,12 @@ qla1280_done(struct scsi_qla_host *ha) scsi_dma_unmap(cmd); /* Call the mid-level driver interrupt handler */ + CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE; ha->actthreads--; - if (sp->wait == NULL) - (*(cmd)->scsi_done)(cmd); - else + (*(cmd)->scsi_done)(cmd); + + if(sp->wait != NULL) complete(sp->wait); } LEAVE("qla1280_done"); @@ -2415,6 +2417,9 @@ static int qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) { struct device_reg __iomem *reg = ha->iobase; +#if 0 + LIST_HEAD(done_q); +#endif int status = 0; int cnt; uint16_t *optr, *iptr; @@ -2488,9 +2493,19 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) mr = MAILBOX_REGISTER_COUNT; memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t)); +#if 0 + /* Go check for any response interrupts pending. */ + qla1280_isr(ha, &done_q); +#endif + if (ha->flags.reset_marker) qla1280_rst_aen(ha); +#if 0 + if (!list_empty(&done_q)) + qla1280_done(ha, &done_q); +#endif + if (status) dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = " "0x%x ****\n", mb[0]); @@ -2625,6 +2640,41 @@ qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target) return status; } +/* + * qla1280_abort_device + * Issue an abort message to the device + * + * Input: + * ha = adapter block pointer. + * bus = SCSI BUS. + * target = SCSI ID. + * lun = SCSI LUN. + * + * Returns: + * 0 = success + */ +static int +qla1280_abort_device(struct scsi_qla_host *ha, int bus, int target, int lun) +{ + uint16_t mb[MAILBOX_REGISTER_COUNT]; + int status; + + ENTER("qla1280_abort_device"); + + mb[0] = MBC_ABORT_DEVICE; + mb[1] = (bus ? target | BIT_7 : target) << 8 | lun; + status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]); + + /* Issue marker command. */ + qla1280_marker(ha, bus, target, lun, MK_SYNC_ID_LUN); + + if (status) + dprintk(2, "qla1280_abort_device: **** FAILED ****\n"); + + LEAVE("qla1280_abort_device"); + return status; +} + /* * qla1280_abort_command * Abort command aborts a specified IOCB. @@ -2783,7 +2833,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) /* If room for request in request ring. */ if ((req_cnt + 2) >= ha->req_q_cnt) { - status = SCSI_MLQUEUE_HOST_BUSY; + status = 1; dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt=" "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, req_cnt); @@ -2795,7 +2845,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) ha->outstanding_cmds[cnt] != NULL; cnt++); if (cnt >= MAX_OUTSTANDING_COMMANDS) { - status = SCSI_MLQUEUE_HOST_BUSY; + status = 1; dprintk(2, "qla1280_start_scsi: NO ROOM IN " "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt); goto out; @@ -3058,7 +3108,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) ha->req_q_cnt, seg_cnt); /* If room for request in request ring. */ if ((req_cnt + 2) >= ha->req_q_cnt) { - status = SCSI_MLQUEUE_HOST_BUSY; + status = 1; dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, " "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt, req_cnt); @@ -3070,7 +3120,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) (ha->outstanding_cmds[cnt] != 0); cnt++) ; if (cnt >= MAX_OUTSTANDING_COMMANDS) { - status = SCSI_MLQUEUE_HOST_BUSY; + status = 1; dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING " "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt); goto out; @@ -3437,7 +3487,6 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q) /* Save ISP completion status */ CMD_RESULT(sp->cmd) = 0; - CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; /* Place block on done queue */ list_add_tail(&sp->list, done_q); @@ -3446,7 +3495,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q) * If we get here we have a real problem! */ printk(KERN_WARNING - "qla1280: ISP invalid handle\n"); + "qla1280: ISP invalid handle"); } } break; @@ -3704,8 +3753,6 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt, } } - CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; - /* Place command on done queue. */ list_add_tail(&sp->list, done_q); out: @@ -3761,8 +3808,6 @@ qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt, CMD_RESULT(sp->cmd) = DID_ERROR << 16; } - CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE; - /* Place command on done queue. */ list_add_tail(&sp->list, done_q); } @@ -3813,16 +3858,19 @@ qla1280_abort_isp(struct scsi_qla_host *ha) struct scsi_cmnd *cmd; sp = ha->outstanding_cmds[cnt]; if (sp) { + cmd = sp->cmd; CMD_RESULT(cmd) = DID_RESET << 16; - CMD_HANDLE(cmd) = COMPLETED_HANDLE; + + sp->cmd = NULL; ha->outstanding_cmds[cnt] = NULL; - list_add_tail(&sp->list, &ha->done_q); + + (*cmd->scsi_done)(cmd); + + sp->flags = 0; } } - qla1280_done(ha); - status = qla1280_load_firmware(ha); if (status) goto out; @@ -3907,6 +3955,13 @@ qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus) if (scsi_control == SCSI_PHASE_INVALID) { ha->bus_settings[bus].scsi_bus_dead = 1; +#if 0 + CMD_RESULT(cp) = DID_NO_CONNECT << 16; + CMD_HANDLE(cp) = INVALID_HANDLE; + /* ha->actthreads--; */ + + (*(cp)->scsi_done)(cp); +#endif return 1; /* bus is dead */ } else { ha->bus_settings[bus].scsi_bus_dead = 0; diff --git a/trunk/drivers/scsi/qla1280.h b/trunk/drivers/scsi/qla1280.h index 834884b9eed5..d7c44b8d2b4f 100644 --- a/trunk/drivers/scsi/qla1280.h +++ b/trunk/drivers/scsi/qla1280.h @@ -88,8 +88,7 @@ /* Maximum outstanding commands in ISP queues */ #define MAX_OUTSTANDING_COMMANDS 512 -#define COMPLETED_HANDLE ((unsigned char *) \ - (MAX_OUTSTANDING_COMMANDS + 2)) +#define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS + 2) /* ISP request and response entry counts (37-65535) */ #define REQUEST_ENTRY_CNT 255 /* Number of request entries. */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_attr.c b/trunk/drivers/scsi/qla2xxx/qla_attr.c index 0f8796201504..b09993a06576 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_attr.c +++ b/trunk/drivers/scsi/qla2xxx/qla_attr.c @@ -97,7 +97,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj, return 0; if (IS_NOCACHE_VPD_TYPE(ha)) - ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, + ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_nvram << 2, ha->nvram_size); return memory_read_from_buffer(buf, count, &off, ha->nvram, ha->nvram_size); @@ -692,109 +692,6 @@ static struct bin_attribute sysfs_edc_status_attr = { .read = qla2x00_sysfs_read_edc_status, }; -static ssize_t -qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t count) -{ - struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, - struct device, kobj))); - struct qla_hw_data *ha = vha->hw; - int rval; - uint16_t actual_size; - - if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE) - return 0; - - if (ha->xgmac_data) - goto do_read; - - ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, - &ha->xgmac_data_dma, GFP_KERNEL); - if (!ha->xgmac_data) { - qla_printk(KERN_WARNING, ha, - "Unable to allocate memory for XGMAC read-data.\n"); - return 0; - } - -do_read: - actual_size = 0; - memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE); - - rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma, - XGMAC_DATA_SIZE, &actual_size); - if (rval != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Unable to read XGMAC data (%x).\n", rval); - count = 0; - } - - count = actual_size > count ? count: actual_size; - memcpy(buf, ha->xgmac_data, count); - - return count; -} - -static struct bin_attribute sysfs_xgmac_stats_attr = { - .attr = { - .name = "xgmac_stats", - .mode = S_IRUSR, - }, - .size = 0, - .read = qla2x00_sysfs_read_xgmac_stats, -}; - -static ssize_t -qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t off, size_t count) -{ - struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, - struct device, kobj))); - struct qla_hw_data *ha = vha->hw; - int rval; - uint16_t actual_size; - - if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE) - return 0; - - if (ha->dcbx_tlv) - goto do_read; - - ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, - &ha->dcbx_tlv_dma, GFP_KERNEL); - if (!ha->dcbx_tlv) { - qla_printk(KERN_WARNING, ha, - "Unable to allocate memory for DCBX TLV read-data.\n"); - return 0; - } - -do_read: - actual_size = 0; - memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE); - - rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, - DCBX_TLV_DATA_SIZE); - if (rval != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Unable to read DCBX TLV data (%x).\n", rval); - count = 0; - } - - memcpy(buf, ha->dcbx_tlv, count); - - return count; -} - -static struct bin_attribute sysfs_dcbx_tlv_attr = { - .attr = { - .name = "dcbx_tlv", - .mode = S_IRUSR, - }, - .size = 0, - .read = qla2x00_sysfs_read_dcbx_tlv, -}; - static struct sysfs_entry { char *name; struct bin_attribute *attr; @@ -809,8 +706,6 @@ static struct sysfs_entry { { "reset", &sysfs_reset_attr, }, { "edc", &sysfs_edc_attr, 2 }, { "edc_status", &sysfs_edc_status_attr, 2 }, - { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 }, - { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 }, { NULL }, }; @@ -826,8 +721,6 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) continue; if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) continue; - if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw)) - continue; ret = sysfs_create_bin_file(&host->shost_gendev.kobj, iter->attr); @@ -850,8 +743,6 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha) continue; if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) continue; - if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha)) - continue; sysfs_remove_bin_file(&host->shost_gendev.kobj, iter->attr); @@ -1197,58 +1088,6 @@ qla2x00_flash_block_size_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); } -static ssize_t -qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); - - if (!IS_QLA81XX(vha->hw)) - return snprintf(buf, PAGE_SIZE, "\n"); - - return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); -} - -static ssize_t -qla2x00_vn_port_mac_address_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); - - if (!IS_QLA81XX(vha->hw)) - return snprintf(buf, PAGE_SIZE, "\n"); - - return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n", - vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4], - vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2], - vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]); -} - -static ssize_t -qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); - - return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap); -} - -static ssize_t -qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); - int rval; - uint16_t state[5]; - - rval = qla2x00_get_firmware_state(vha, state); - if (rval != QLA_SUCCESS) - memset(state, -1, sizeof(state)); - - return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0], - state[1], state[2], state[3], state[4]); -} - static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); @@ -1277,11 +1116,6 @@ static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL); static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL); static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, NULL); -static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL); -static DEVICE_ATTR(vn_port_mac_address, S_IRUGO, - qla2x00_vn_port_mac_address_show, NULL); -static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL); -static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL); struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_driver_version, @@ -1304,10 +1138,6 @@ struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_mpi_version, &dev_attr_phy_version, &dev_attr_flash_block_size, - &dev_attr_vlan_id, - &dev_attr_vn_port_mac_address, - &dev_attr_fabric_param, - &dev_attr_fw_state, NULL, }; @@ -1483,8 +1313,7 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) * At this point all fcport's software-states are cleared. Perform any * final cleanup of firmware resources (PCBs and XCBs). */ - if (fcport->loop_id != FC_NO_LOOP_ID && - !test_bit(UNLOADING, &fcport->vha->dpc_flags)) + if (fcport->loop_id != FC_NO_LOOP_ID) fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); @@ -1608,13 +1437,11 @@ static int qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) { int ret = 0; - uint8_t qos = 0; + int cnt = 0; + uint8_t qos = QLA_DEFAULT_QUE_QOS; scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); scsi_qla_host_t *vha = NULL; struct qla_hw_data *ha = base_vha->hw; - uint16_t options = 0; - int cnt; - struct req_que *req = ha->req_q_map[0]; ret = qla24xx_vport_create_req_sanity_check(fc_vport); if (ret) { @@ -1670,39 +1497,23 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) qla24xx_vport_disable(fc_vport, disable); - if (ql2xmultique_tag) { - req = ha->req_q_map[1]; - goto vport_queue; - } else if (ql2xmaxqueues == 1 || !ha->npiv_info) - goto vport_queue; - /* Create a request queue in QoS mode for the vport */ - for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) { - if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0 - && memcmp(ha->npiv_info[cnt].node_name, vha->node_name, - 8) == 0) { - qos = ha->npiv_info[cnt].q_qos; - break; - } - } - if (qos) { - ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0, - qos); - if (!ret) - qla_printk(KERN_WARNING, ha, - "Can't create request queue for vp_idx:%d\n", - vha->vp_idx); - else { - DEBUG2(qla_printk(KERN_INFO, ha, - "Request Que:%d (QoS: %d) created for vp_idx:%d\n", - ret, qos, vha->vp_idx)); - req = ha->req_q_map[ret]; + /* Create a queue pair for the vport */ + if (ha->mqenable) { + if (ha->npiv_info) { + for (; cnt < ha->nvram_npiv_size; cnt++) { + if (ha->npiv_info[cnt].port_name == + vha->port_name && + ha->npiv_info[cnt].node_name == + vha->node_name) { + qos = ha->npiv_info[cnt].q_qos; + break; + } + } } + qla25xx_create_queues(vha, qos); } -vport_queue: - vha->req = req; return 0; - vport_create_failed_2: qla24xx_disable_vp(vha); qla24xx_deallocate_vp_id(vha); @@ -1743,8 +1554,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) vha->host_no, vha->vp_idx, vha)); } - if (vha->req->id && !ql2xmultique_tag) { - if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS) + if (ha->mqenable) { + if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS) qla_printk(KERN_WARNING, ha, "Queue delete failed.\n"); } diff --git a/trunk/drivers/scsi/qla2xxx/qla_dbg.c b/trunk/drivers/scsi/qla2xxx/qla_dbg.c index 4a990f4da4ea..34760f8d4f17 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_dbg.c +++ b/trunk/drivers/scsi/qla2xxx/qla_dbg.c @@ -149,9 +149,11 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg) int rval = QLA_SUCCESS; uint32_t cnt; + if (RD_REG_DWORD(®->hccr) & HCCRX_RISC_PAUSE) + return rval; + WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_PAUSE); - for (cnt = 30000; - ((RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) == 0) && + for (cnt = 30000; (RD_REG_DWORD(®->hccr) & HCCRX_RISC_PAUSE) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); @@ -349,7 +351,7 @@ static inline void * qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) { uint32_t cnt, que_idx; - uint8_t que_cnt; + uint8_t req_cnt, rsp_cnt, que_cnt; struct qla2xxx_mq_chain *mq = ptr; struct device_reg_25xxmq __iomem *reg; @@ -361,8 +363,9 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) mq->type = __constant_htonl(DUMP_CHAIN_MQ); mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain)); - que_cnt = ha->max_req_queues > ha->max_rsp_queues ? - ha->max_req_queues : ha->max_rsp_queues; + req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues); + rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); + que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt; mq->count = htonl(que_cnt); for (cnt = 0; cnt < que_cnt; cnt++) { reg = (struct device_reg_25xxmq *) ((void *) diff --git a/trunk/drivers/scsi/qla2xxx/qla_def.h b/trunk/drivers/scsi/qla2xxx/qla_def.h index 00aa48d975a6..714ee67567e1 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_def.h +++ b/trunk/drivers/scsi/qla2xxx/qla_def.h @@ -93,7 +93,6 @@ #define LSD(x) ((uint32_t)((uint64_t)(x))) #define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) -#define MAKE_HANDLE(x, y) ((uint32_t)((((uint32_t)(x)) << 16) | (uint32_t)(y))) /* * I/O register @@ -180,7 +179,6 @@ #define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */ #define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ #define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ -#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/ struct req_que; @@ -188,6 +186,7 @@ struct req_que; * SCSI Request Block */ typedef struct srb { + struct req_que *que; struct fc_port *fcport; struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ @@ -2009,7 +2008,7 @@ typedef struct vport_params { #define VP_RET_CODE_NOT_FOUND 6 struct qla_hw_data; -struct rsp_que; + /* * ISP operations */ @@ -2031,9 +2030,10 @@ struct isp_operations { void (*enable_intrs) (struct qla_hw_data *); void (*disable_intrs) (struct qla_hw_data *); - int (*abort_command) (srb_t *); - int (*target_reset) (struct fc_port *, unsigned int, int); - int (*lun_reset) (struct fc_port *, unsigned int, int); + int (*abort_command) (struct scsi_qla_host *, srb_t *, + struct req_que *); + int (*target_reset) (struct fc_port *, unsigned int); + int (*lun_reset) (struct fc_port *, unsigned int); int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t, uint8_t, uint8_t, uint16_t *, uint8_t); int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t, @@ -2079,6 +2079,7 @@ struct isp_operations { #define QLA_PCI_MSIX_CONTROL 0xa2 struct scsi_qla_host; +struct rsp_que; struct qla_msix_entry { int have_irq; @@ -2139,6 +2140,7 @@ struct qla_statistics { #define MBC_INITIALIZE_MULTIQ 0x1f #define QLA_QUE_PAGE 0X1000 #define QLA_MQ_SIZE 32 +#define QLA_MAX_HOST_QUES 16 #define QLA_MAX_QUEUES 256 #define ISP_QUE_REG(ha, id) \ ((ha->mqenable) ? \ @@ -2168,8 +2170,6 @@ struct rsp_que { struct qla_hw_data *hw; struct qla_msix_entry *msix; struct req_que *req; - srb_t *status_srb; /* status continuation entry */ - struct work_struct q_work; }; /* Request queue data structure */ @@ -2222,8 +2222,6 @@ struct qla_hw_data { uint32_t fce_enabled :1; uint32_t fac_supported :1; uint32_t chip_reset_done :1; - uint32_t port0 :1; - uint32_t running_gold_fw :1; } flags; /* This spinlock is used to protect "io transactions", you must @@ -2248,8 +2246,7 @@ struct qla_hw_data { struct rsp_que **rsp_q_map; unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; - uint8_t max_req_queues; - uint8_t max_rsp_queues; + uint16_t max_queues; struct qla_npiv_entry *npiv_info; uint16_t nvram_npiv_size; @@ -2258,9 +2255,6 @@ struct qla_hw_data { #define FLOGI_MID_SUPPORT BIT_10 #define FLOGI_VSAN_SUPPORT BIT_12 #define FLOGI_SP_SUPPORT BIT_13 - - uint8_t port_no; /* Physical port of adapter */ - /* Timeout timers. */ uint8_t loop_down_abort_time; /* port down timer */ atomic_t loop_down_timer; /* loop down timer */ @@ -2398,14 +2392,6 @@ struct qla_hw_data { dma_addr_t edc_data_dma; uint16_t edc_data_len; -#define XGMAC_DATA_SIZE PAGE_SIZE - void *xgmac_data; - dma_addr_t xgmac_data_dma; - -#define DCBX_TLV_DATA_SIZE PAGE_SIZE - void *dcbx_tlv; - dma_addr_t dcbx_tlv_dma; - struct task_struct *dpc_thread; uint8_t dpc_active; /* DPC routine is active */ @@ -2524,7 +2510,6 @@ struct qla_hw_data { uint32_t flt_region_vpd; uint32_t flt_region_nvram; uint32_t flt_region_npiv_conf; - uint32_t flt_region_gold_fw; /* Needed for BEACON */ uint16_t beacon_blink_led; @@ -2551,7 +2536,6 @@ struct qla_hw_data { struct qla_chip_state_84xx *cs84xx; struct qla_statistics qla_stats; struct isp_operations *isp_ops; - struct workqueue_struct *wq; }; /* @@ -2561,8 +2545,6 @@ typedef struct scsi_qla_host { struct list_head list; struct list_head vp_fcports; /* list of fcports */ struct list_head work_list; - spinlock_t work_lock; - /* Commonly used flags and state information. */ struct Scsi_Host *host; unsigned long host_no; @@ -2609,6 +2591,8 @@ typedef struct scsi_qla_host { #define SWITCH_FOUND BIT_0 #define DFLG_NO_CABLE BIT_1 + srb_t *status_srb; /* Status continuation entry. */ + /* ISP configuration data. */ uint16_t loop_id; /* Host adapter loop id */ @@ -2634,11 +2618,6 @@ typedef struct scsi_qla_host { uint8_t node_name[WWN_SIZE]; uint8_t port_name[WWN_SIZE]; uint8_t fabric_node_name[WWN_SIZE]; - - uint16_t fcoe_vlan_id; - uint16_t fcoe_fcf_idx; - uint8_t fcoe_vn_port_mac[6]; - uint32_t vp_abort_cnt; struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ @@ -2664,7 +2643,7 @@ typedef struct scsi_qla_host { #define VP_ERR_FAB_LOGOUT 4 #define VP_ERR_ADAP_NORESOURCES 5 struct qla_hw_data *hw; - struct req_que *req; + int req_ques[QLA_MAX_HOST_QUES]; } scsi_qla_host_t; /* diff --git a/trunk/drivers/scsi/qla2xxx/qla_fw.h b/trunk/drivers/scsi/qla2xxx/qla_fw.h index dfde2dd865cb..96ccb9642ba0 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_fw.h +++ b/trunk/drivers/scsi/qla2xxx/qla_fw.h @@ -878,6 +878,7 @@ struct device_reg_24xx { /* HCCR statuses. */ #define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */ #define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */ +#define HCCRX_RISC_PAUSE BIT_4 /* RISC Pause mode bit. */ /* HCCR commands. */ /* NOOP. */ #define HCCRX_NOOP 0x00000000 @@ -1240,7 +1241,6 @@ struct qla_flt_header { #define FLT_REG_HW_EVENT_1 0x1f #define FLT_REG_NPIV_CONF_0 0x29 #define FLT_REG_NPIV_CONF_1 0x2a -#define FLT_REG_GOLD_FW 0x2f struct qla_flt_region { uint32_t code; @@ -1405,8 +1405,6 @@ struct access_chip_rsp_84xx { #define MBC_IDC_ACK 0x101 #define MBC_RESTART_MPI_FW 0x3d #define MBC_FLASH_ACCESS_CTRL 0x3e /* Control flash access. */ -#define MBC_GET_XGMAC_STATS 0x7a -#define MBC_GET_DCBX_PARAMS 0x51 /* Flash access control option field bit definitions */ #define FAC_OPT_FORCE_SEMAPHORE BIT_15 @@ -1713,7 +1711,7 @@ struct ex_init_cb_81xx { #define FA_VPD0_ADDR_81 0xD0000 #define FA_VPD1_ADDR_81 0xD0400 #define FA_NVRAM0_ADDR_81 0xD0080 -#define FA_NVRAM1_ADDR_81 0xD0180 +#define FA_NVRAM1_ADDR_81 0xD0480 #define FA_FEATURE_ADDR_81 0xD4000 #define FA_FLASH_DESCR_ADDR_81 0xD8000 #define FA_FLASH_LAYOUT_ADDR_81 0xD8400 diff --git a/trunk/drivers/scsi/qla2xxx/qla_gbl.h b/trunk/drivers/scsi/qla2xxx/qla_gbl.h index 65b12d82867c..528913f6bed9 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_gbl.h +++ b/trunk/drivers/scsi/qla2xxx/qla_gbl.h @@ -65,11 +65,8 @@ extern int ql2xfdmienable; extern int ql2xallocfwdump; extern int ql2xextended_error_logging; extern int ql2xqfullrampup; -extern int ql2xqfulltracking; extern int ql2xiidmaenable; extern int ql2xmaxqueues; -extern int ql2xmultique_tag; -extern int ql2xfwloadbin; extern int qla2x00_loop_reset(scsi_qla_host_t *); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); @@ -148,7 +145,7 @@ qla2x00_dump_ram(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t); extern int qla2x00_execute_fw(scsi_qla_host_t *, uint32_t); -extern int +extern void qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *, uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *); @@ -168,13 +165,13 @@ extern int qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t); extern int -qla2x00_abort_command(srb_t *); +qla2x00_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); extern int -qla2x00_abort_target(struct fc_port *, unsigned int, int); +qla2x00_abort_target(struct fc_port *, unsigned int); extern int -qla2x00_lun_reset(struct fc_port *, unsigned int, int); +qla2x00_lun_reset(struct fc_port *, unsigned int); extern int qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *, @@ -239,11 +236,9 @@ extern int qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, dma_addr_t); -extern int qla24xx_abort_command(srb_t *); -extern int -qla24xx_abort_target(struct fc_port *, unsigned int, int); -extern int -qla24xx_lun_reset(struct fc_port *, unsigned int, int); +extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *); +extern int qla24xx_abort_target(struct fc_port *, unsigned int); +extern int qla24xx_lun_reset(struct fc_port *, unsigned int); extern int qla2x00_system_error(scsi_qla_host_t *); @@ -293,18 +288,6 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *, int); extern int qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t); -extern int -qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *); - -extern int -qla2x00_get_dcbx_params(scsi_qla_host_t *, dma_addr_t, uint16_t); - -extern int -qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *); - -extern int -qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t); - /* * Global Function Prototypes in qla_isr.c source file. */ @@ -312,8 +295,8 @@ extern irqreturn_t qla2100_intr_handler(int, void *); extern irqreturn_t qla2300_intr_handler(int, void *); extern irqreturn_t qla24xx_intr_handler(int, void *); extern void qla2x00_process_response_queue(struct rsp_que *); -extern void -qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *); +extern void qla24xx_process_response_queue(struct rsp_que *); + extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *); extern void qla2x00_free_irqs(scsi_qla_host_t *); @@ -418,21 +401,19 @@ extern int qla25xx_request_irq(struct rsp_que *); extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *); extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *); extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t, - uint16_t, int, uint8_t); + uint16_t, uint8_t, uint8_t); extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t, - uint16_t, int); + uint16_t); extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t); extern void qla2x00_init_response_q_entries(struct rsp_que *); extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *); extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *); extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t); -extern int qla25xx_delete_queues(struct scsi_qla_host *); +extern int qla25xx_delete_queues(struct scsi_qla_host *, uint8_t); extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t); extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t); extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); -extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *); - #endif /* _QLA_GBL_H */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_gs.c b/trunk/drivers/scsi/qla2xxx/qla_gs.c index 917534b9f221..557f58d5bf88 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_gs.c +++ b/trunk/drivers/scsi/qla2xxx/qla_gs.c @@ -1107,7 +1107,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha) return ret; ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, - mb, BIT_1|BIT_0); + mb, BIT_1); if (mb[0] != MBS_COMMAND_COMPLETE) { DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: " "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n", @@ -1879,9 +1879,6 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) case BIT_13: list[i].fp_speed = PORT_SPEED_4GB; break; - case BIT_12: - list[i].fp_speed = PORT_SPEED_10GB; - break; case BIT_11: list[i].fp_speed = PORT_SPEED_8GB; break; diff --git a/trunk/drivers/scsi/qla2xxx/qla_init.c b/trunk/drivers/scsi/qla2xxx/qla_init.c index 262026129325..bd7dd84c0648 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_init.c +++ b/trunk/drivers/scsi/qla2xxx/qla_init.c @@ -634,7 +634,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) goto chip_diag_failed; DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n", - vha->host_no)); + ha->host_no)); /* Reset RISC processor. */ WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); @@ -655,7 +655,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) goto chip_diag_failed; /* Check product ID of chip */ - DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no)); + DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", ha->host_no)); mb[1] = RD_MAILBOX_REG(ha, reg, 1); mb[2] = RD_MAILBOX_REG(ha, reg, 2); @@ -730,6 +730,9 @@ qla24xx_chip_diag(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; + /* Perform RISC reset. */ + qla24xx_reset_risc(vha); + ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length; rval = qla2x00_mbx_reg_test(vha); @@ -783,6 +786,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) sizeof(uint32_t); if (ha->mqenable) mq_size = sizeof(struct qla2xxx_mq_chain); + /* Allocate memory for Fibre Channel Event Buffer. */ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) goto try_eft; @@ -846,7 +850,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) rsp_q_size = rsp->length * sizeof(response_t); dump_size = offsetof(struct qla2xxx_fw_dump, isp); - dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size; + dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + + eft_size; ha->chain_offset = dump_size; dump_size += mq_size + fce_size; @@ -886,56 +891,6 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) htonl(offsetof(struct qla2xxx_fw_dump, isp)); } -static int -qla81xx_mpi_sync(scsi_qla_host_t *vha) -{ -#define MPS_MASK 0xe0 - int rval; - uint16_t dc; - uint32_t dw; - struct qla_hw_data *ha = vha->hw; - - if (!IS_QLA81XX(vha->hw)) - return QLA_SUCCESS; - - rval = qla2x00_write_ram_word(vha, 0x7c00, 1); - if (rval != QLA_SUCCESS) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "Sync-MPI: Unable to acquire semaphore.\n")); - goto done; - } - - pci_read_config_word(vha->hw->pdev, 0x54, &dc); - rval = qla2x00_read_ram_word(vha, 0x7a15, &dw); - if (rval != QLA_SUCCESS) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "Sync-MPI: Unable to read sync.\n")); - goto done_release; - } - - dc &= MPS_MASK; - if (dc == (dw & MPS_MASK)) - goto done_release; - - dw &= ~MPS_MASK; - dw |= dc; - rval = qla2x00_write_ram_word(vha, 0x7a15, dw); - if (rval != QLA_SUCCESS) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "Sync-MPI: Unable to gain sync.\n")); - } - -done_release: - rval = qla2x00_write_ram_word(vha, 0x7c00, 0); - if (rval != QLA_SUCCESS) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "Sync-MPI: Unable to release semaphore.\n")); - } - -done: - return rval; -} - /** * qla2x00_setup_chip() - Load and start RISC firmware. * @ha: HA context @@ -960,8 +915,6 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) spin_unlock_irqrestore(&ha->hardware_lock, flags); } - qla81xx_mpi_sync(vha); - /* Load firmware sequences */ rval = ha->isp_ops->load_risc(vha, &srisc_address); if (rval == QLA_SUCCESS) { @@ -978,16 +931,13 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) /* Retrieve firmware information. */ if (rval == QLA_SUCCESS) { fw_major_version = ha->fw_major_version; - rval = qla2x00_get_fw_version(vha, + qla2x00_get_fw_version(vha, &ha->fw_major_version, &ha->fw_minor_version, &ha->fw_subminor_version, &ha->fw_attributes, &ha->fw_memory_size, ha->mpi_version, &ha->mpi_capabilities, ha->phy_version); - if (rval != QLA_SUCCESS) - goto failed; - ha->flags.npiv_supported = 0; if (IS_QLA2XXX_MIDTYPE(ha) && (ha->fw_attributes & BIT_2)) { @@ -1039,7 +989,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) ha->fw_subminor_version); } } -failed: + if (rval) { DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n", vha->host_no)); @@ -1063,14 +1013,12 @@ qla2x00_init_response_q_entries(struct rsp_que *rsp) uint16_t cnt; response_t *pkt; - rsp->ring_ptr = rsp->ring; - rsp->ring_index = 0; - rsp->status_srb = NULL; pkt = rsp->ring_ptr; for (cnt = 0; cnt < rsp->length; cnt++) { pkt->signature = RESPONSE_PROCESSED; pkt++; } + } /** @@ -1228,7 +1176,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha) if (ha->flags.msix_enabled) { msix = &ha->msix_entries[1]; DEBUG2_17(printk(KERN_INFO - "Registering vector 0x%x for base que\n", msix->entry)); + "Reistering vector 0x%x for base que\n", msix->entry)); icb->msix = cpu_to_le16(msix->entry); } /* Use alternate PCI bus number */ @@ -1282,14 +1230,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha) spin_lock_irqsave(&ha->hardware_lock, flags); /* Clear outstanding commands array. */ - for (que = 0; que < ha->max_req_queues; que++) { + for (que = 0; que < ha->max_queues; que++) { req = ha->req_q_map[que]; if (!req) continue; - for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) + for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) req->outstanding_cmds[cnt] = NULL; - req->current_outstanding_cmd = 1; + req->current_outstanding_cmd = 0; /* Initialize firmware. */ req->ring_ptr = req->ring; @@ -1297,10 +1245,13 @@ qla2x00_init_rings(scsi_qla_host_t *vha) req->cnt = req->length; } - for (que = 0; que < ha->max_rsp_queues; que++) { + for (que = 0; que < ha->max_queues; que++) { rsp = ha->rsp_q_map[que]; if (!rsp) continue; + rsp->ring_ptr = rsp->ring; + rsp->ring_index = 0; + /* Initialize response queue entries */ qla2x00_init_response_q_entries(rsp); } @@ -1356,7 +1307,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) unsigned long wtime, mtime, cs84xx_time; uint16_t min_wait; /* Minimum wait time if loop is down */ uint16_t wait_time; /* Wait time if loop is coming ready */ - uint16_t state[5]; + uint16_t state[3]; struct qla_hw_data *ha = vha->hw; rval = QLA_SUCCESS; @@ -1455,9 +1406,8 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) vha->host_no, state[0], jiffies)); } while (1); - DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n", - vha->host_no, state[0], state[1], state[2], state[3], state[4], - jiffies)); + DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n", + vha->host_no, state[0], jiffies)); if (rval) { DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n", @@ -1591,7 +1541,6 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, char *st, *en; uint16_t index; struct qla_hw_data *ha = vha->hw; - int use_tbl = !IS_QLA25XX(ha) && !IS_QLA81XX(ha); if (memcmp(model, BINZERO, len) != 0) { strncpy(ha->model_number, model, len); @@ -1604,16 +1553,14 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, } index = (ha->pdev->subsystem_device & 0xff); - if (use_tbl && - ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && + if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && index < QLA_MODEL_NAMES) strncpy(ha->model_desc, qla2x00_model_name[index * 2 + 1], sizeof(ha->model_desc) - 1); } else { index = (ha->pdev->subsystem_device & 0xff); - if (use_tbl && - ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && + if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && index < QLA_MODEL_NAMES) { strcpy(ha->model_number, qla2x00_model_name[index * 2]); @@ -2114,10 +2061,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { if (test_bit(LOCAL_LOOP_UPDATE, &save_flags)) set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); - if (test_bit(RSCN_UPDATE, &save_flags)) { + if (test_bit(RSCN_UPDATE, &save_flags)) set_bit(RSCN_UPDATE, &vha->dpc_flags); - vha->flags.rscn_queue_overflow = 1; - } } return (rval); @@ -2165,7 +2110,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) goto cleanup_allocation; DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n", - vha->host_no, entries)); + ha->host_no, entries)); DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list, entries * sizeof(struct gid_list_info))); @@ -2298,8 +2243,7 @@ static void qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { #define LS_UNKNOWN 2 - static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; - char *link_speed; + static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; int rval; uint16_t mb[6]; struct qla_hw_data *ha = vha->hw; @@ -2322,15 +2266,10 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) fcport->port_name[6], fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1])); } else { - link_speed = link_speeds[LS_UNKNOWN]; - if (fcport->fp_speed < 5) - link_speed = link_speeds[fcport->fp_speed]; - else if (fcport->fp_speed == 0x13) - link_speed = link_speeds[5]; DEBUG2(qla_printk(KERN_INFO, ha, "iIDMA adjusted to %s GB/s on " "%02x%02x%02x%02x%02x%02x%02x%02x.\n", - link_speed, fcport->port_name[0], + link_speeds[fcport->fp_speed], fcport->port_name[0], fcport->port_name[1], fcport->port_name[2], fcport->port_name[3], fcport->port_name[4], fcport->port_name[5], fcport->port_name[6], @@ -3241,14 +3180,9 @@ qla2x00_loop_resync(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; uint32_t wait_time; - struct req_que *req; - struct rsp_que *rsp; - - if (ql2xmultique_tag) - req = vha->hw->req_q_map[0]; - else - req = vha->req; - rsp = req->rsp; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[vha->req_ques[0]]; + struct rsp_que *rsp = req->rsp; atomic_set(&vha->loop_state, LOOP_UPDATE); clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); @@ -3514,7 +3448,7 @@ qla25xx_init_queues(struct qla_hw_data *ha) int ret = -1; int i; - for (i = 1; i < ha->max_rsp_queues; i++) { + for (i = 1; i < ha->max_queues; i++) { rsp = ha->rsp_q_map[i]; if (rsp) { rsp->options &= ~BIT_0; @@ -3528,8 +3462,6 @@ qla25xx_init_queues(struct qla_hw_data *ha) "%s Rsp que:%d inited\n", __func__, rsp->id)); } - } - for (i = 1; i < ha->max_req_queues; i++) { req = ha->req_q_map[i]; if (req) { /* Clear outstanding commands array. */ @@ -3634,15 +3566,14 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) nv = ha->nvram; /* Determine NVRAM starting address. */ - if (ha->flags.port0) { - ha->nvram_base = FA_NVRAM_FUNC0_ADDR; - ha->vpd_base = FA_NVRAM_VPD0_ADDR; - } else { + ha->nvram_size = sizeof(struct nvram_24xx); + ha->nvram_base = FA_NVRAM_FUNC0_ADDR; + ha->vpd_size = FA_NVRAM_VPD_SIZE; + ha->vpd_base = FA_NVRAM_VPD0_ADDR; + if (PCI_FUNC(ha->pdev->devfn)) { ha->nvram_base = FA_NVRAM_FUNC1_ADDR; ha->vpd_base = FA_NVRAM_VPD1_ADDR; } - ha->nvram_size = sizeof(struct nvram_24xx); - ha->vpd_size = FA_NVRAM_VPD_SIZE; /* Get VPD data into cache */ ha->vpd = ha->nvram + VPD_OFFSET; @@ -3656,7 +3587,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) chksum += le32_to_cpu(*dptr++); - DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); + DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); /* Bad NVRAM data, set defaults parameters. */ @@ -3681,7 +3612,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) nv->exchange_count = __constant_cpu_to_le16(0); nv->hard_address = __constant_cpu_to_le16(124); nv->port_name[0] = 0x21; - nv->port_name[1] = 0x00 + ha->port_no; + nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); nv->port_name[2] = 0x00; nv->port_name[3] = 0xe0; nv->port_name[4] = 0x8b; @@ -3867,11 +3798,11 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) } static int -qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, - uint32_t faddr) +qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval = QLA_SUCCESS; int segments, fragment; + uint32_t faddr; uint32_t *dcode, dlen; uint32_t risc_addr; uint32_t risc_size; @@ -3880,11 +3811,12 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, struct req_que *req = ha->req_q_map[0]; qla_printk(KERN_INFO, ha, - "FW: Loading from flash (%x)...\n", faddr); + "FW: Loading from flash (%x)...\n", ha->flt_region_fw); rval = QLA_SUCCESS; segments = FA_RISC_CODE_SEGMENTS; + faddr = ha->flt_region_fw; dcode = (uint32_t *)req->ring; *srisc_addr = 0; @@ -4172,9 +4104,6 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; - if (ql2xfwloadbin == 1) - return qla81xx_load_risc(vha, srisc_addr); - /* * FW Load priority: * 1) Firmware via request-firmware interface (.bin file). @@ -4184,45 +4113,24 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) if (rval == QLA_SUCCESS) return rval; - return qla24xx_load_risc_flash(vha, srisc_addr, - vha->hw->flt_region_fw); + return qla24xx_load_risc_flash(vha, srisc_addr); } int qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; - struct qla_hw_data *ha = vha->hw; - - if (ql2xfwloadbin == 2) - goto try_blob_fw; /* * FW Load priority: * 1) Firmware residing in flash. * 2) Firmware via request-firmware interface (.bin file). - * 3) Golden-Firmware residing in flash -- limited operation. */ - rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); + rval = qla24xx_load_risc_flash(vha, srisc_addr); if (rval == QLA_SUCCESS) return rval; -try_blob_fw: - rval = qla24xx_load_risc_blob(vha, srisc_addr); - if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw) - return rval; - - qla_printk(KERN_ERR, ha, - "FW: Attempting to fallback to golden firmware...\n"); - rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); - if (rval != QLA_SUCCESS) - return rval; - - qla_printk(KERN_ERR, ha, - "FW: Please update operational firmware...\n"); - ha->flags.running_gold_fw = 1; - - return rval; + return qla24xx_load_risc_blob(vha, srisc_addr); } void @@ -4238,7 +4146,7 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) ret = qla2x00_stop_firmware(vha); for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && - ret != QLA_INVALID_COMMAND && retries ; retries--) { + retries ; retries--) { ha->isp_ops->reset_chip(vha); if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS) continue; @@ -4257,19 +4165,13 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha) uint16_t mb[MAILBOX_REGISTER_COUNT]; struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - struct req_que *req; - struct rsp_que *rsp; + struct req_que *req = ha->req_q_map[vha->req_ques[0]]; + struct rsp_que *rsp = req->rsp; if (!vha->vp_idx) return -EINVAL; rval = qla2x00_fw_ready(base_vha); - if (ql2xmultique_tag) - req = ha->req_q_map[0]; - else - req = vha->req; - rsp = req->rsp; - if (rval == QLA_SUCCESS) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); @@ -4403,7 +4305,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++) chksum += le32_to_cpu(*dptr++); - DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no)); + DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no)); DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size)); /* Bad NVRAM data, set defaults parameters. */ @@ -4427,7 +4329,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); nv->exchange_count = __constant_cpu_to_le16(0); nv->port_name[0] = 0x21; - nv->port_name[1] = 0x00 + ha->port_no; + nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn); nv->port_name[2] = 0x00; nv->port_name[3] = 0xe0; nv->port_name[4] = 0x8b; @@ -4456,12 +4358,12 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) nv->max_luns_per_target = __constant_cpu_to_le16(128); nv->port_down_retry_count = __constant_cpu_to_le16(30); nv->link_down_timeout = __constant_cpu_to_le16(30); - nv->enode_mac[0] = 0x00; + nv->enode_mac[0] = 0x01; nv->enode_mac[1] = 0x02; nv->enode_mac[2] = 0x03; nv->enode_mac[3] = 0x04; nv->enode_mac[4] = 0x05; - nv->enode_mac[5] = 0x06 + ha->port_no; + nv->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); rval = 1; } @@ -4494,7 +4396,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) icb->enode_mac[2] = 0x03; icb->enode_mac[3] = 0x04; icb->enode_mac[4] = 0x05; - icb->enode_mac[5] = 0x06 + ha->port_no; + icb->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn); } /* Use extended-initialization control block. */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_iocb.c b/trunk/drivers/scsi/qla2xxx/qla_iocb.c index 13396beae2ce..a8abbb95730d 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_iocb.c +++ b/trunk/drivers/scsi/qla2xxx/qla_iocb.c @@ -15,7 +15,6 @@ static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *, struct rsp_que *rsp); static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *); -static void qla25xx_set_que(srb_t *, struct rsp_que **); /** * qla2x00_get_cmd_direction() - Determine control_flag data direction. * @cmd: SCSI command @@ -93,10 +92,9 @@ qla2x00_calc_iocbs_64(uint16_t dsds) * Returns a pointer to the Continuation Type 0 IOCB packet. */ static inline cont_entry_t * -qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) +qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha) { cont_entry_t *cont_pkt; - struct req_que *req = vha->req; /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { @@ -122,11 +120,10 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) * Returns a pointer to the continuation type 1 IOCB packet. */ static inline cont_a64_entry_t * -qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha) +qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha) { cont_a64_entry_t *cont_pkt; - struct req_que *req = vha->req; /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { @@ -162,6 +159,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, struct scsi_cmnd *cmd; struct scatterlist *sg; int i; + struct req_que *req; cmd = sp->cmd; @@ -176,6 +174,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, } vha = sp->fcport->vha; + req = sp->que; + cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); /* Three DSDs are available in the Command Type 2 IOCB */ @@ -192,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, * Seven DSDs are available in the Continuation * Type 0 IOCB. */ - cont_pkt = qla2x00_prep_cont_type0_iocb(vha); + cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha); cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; avail_dsds = 7; } @@ -220,6 +220,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, struct scsi_cmnd *cmd; struct scatterlist *sg; int i; + struct req_que *req; cmd = sp->cmd; @@ -234,6 +235,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, } vha = sp->fcport->vha; + req = sp->que; + cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp)); /* Two DSDs are available in the Command Type 3 IOCB */ @@ -251,7 +254,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, * Five DSDs are available in the Continuation * Type 1 IOCB. */ - cont_pkt = qla2x00_prep_cont_type1_iocb(vha); + cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; avail_dsds = 5; } @@ -350,6 +353,7 @@ qla2x00_start_scsi(srb_t *sp) /* Build command packet */ req->current_outstanding_cmd = handle; req->outstanding_cmds[handle] = sp; + sp->que = req; sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; req->cnt -= req_cnt; @@ -449,7 +453,6 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, mrk24->lun[2] = MSB(lun); host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); mrk24->vp_index = vha->vp_idx; - mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle); } else { SET_TARGET_ID(ha, mrk->target, loop_id); mrk->lun = cpu_to_le16(lun); @@ -528,6 +531,9 @@ qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req, for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++) *dword_ptr++ = 0; + /* Set system defined field. */ + pkt->sys_define = (uint8_t)req->ring_index; + /* Set entry count. */ pkt->entry_count = 1; @@ -650,7 +656,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, } vha = sp->fcport->vha; - req = vha->req; + req = sp->que; /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { @@ -681,7 +687,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, * Five DSDs are available in the Continuation * Type 1 IOCB. */ - cont_pkt = qla2x00_prep_cont_type1_iocb(vha); + cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha); cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; avail_dsds = 5; } @@ -718,13 +724,19 @@ qla24xx_start_scsi(srb_t *sp) struct scsi_cmnd *cmd = sp->cmd; struct scsi_qla_host *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; + uint16_t que_id; /* Setup device pointers. */ ret = 0; + que_id = vha->req_ques[0]; - qla25xx_set_que(sp, &rsp); - req = vha->req; + req = ha->req_q_map[que_id]; + sp->que = req; + if (req->rsp) + rsp = req->rsp; + else + rsp = ha->rsp_q_map[que_id]; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; @@ -782,7 +794,7 @@ qla24xx_start_scsi(srb_t *sp) req->cnt -= req_cnt; cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; - cmd_pkt->handle = MAKE_HANDLE(req->id, handle); + cmd_pkt->handle = handle; /* Zero out remaining portion of packet. */ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ @@ -811,8 +823,6 @@ qla24xx_start_scsi(srb_t *sp) /* Set total data segment count. */ cmd_pkt->entry_count = (uint8_t)req_cnt; - /* Specify response queue number where completion should happen */ - cmd_pkt->entry_status = (uint8_t) rsp->id; wmb(); /* Adjust ring index. */ @@ -832,7 +842,7 @@ qla24xx_start_scsi(srb_t *sp) /* Manage unprocessed RIO/ZIO commands in response queue. */ if (vha->flags.process_response_queue && rsp->ring_ptr->signature != RESPONSE_PROCESSED) - qla24xx_process_response_queue(vha, rsp); + qla24xx_process_response_queue(rsp); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; @@ -845,16 +855,3 @@ qla24xx_start_scsi(srb_t *sp) return QLA_FUNCTION_FAILED; } - -static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) -{ - struct scsi_cmnd *cmd = sp->cmd; - struct qla_hw_data *ha = sp->fcport->vha->hw; - int affinity = cmd->request->cpu; - - if (ql2xmultique_tag && affinity >= 0 && - affinity < ha->max_rsp_queues - 1) - *rsp = ha->rsp_q_map[affinity + 1]; - else - *rsp = ha->rsp_q_map[0]; -} diff --git a/trunk/drivers/scsi/qla2xxx/qla_isr.c b/trunk/drivers/scsi/qla2xxx/qla_isr.c index c8d0a176fea4..d04981848e56 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_isr.c +++ b/trunk/drivers/scsi/qla2xxx/qla_isr.c @@ -13,9 +13,10 @@ static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); static void qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *, uint32_t); static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); -static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); +static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, sts_entry_t *); +static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *); /** * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. @@ -50,7 +51,7 @@ qla2100_intr_handler(int irq, void *dev_id) status = 0; spin_lock(&ha->hardware_lock); - vha = pci_get_drvdata(ha->pdev); + vha = qla2x00_get_rsp_host(rsp); for (iter = 50; iter--; ) { hccr = RD_REG_WORD(®->hccr); if (hccr & HCCR_RISC_PAUSE) { @@ -146,7 +147,7 @@ qla2300_intr_handler(int irq, void *dev_id) status = 0; spin_lock(&ha->hardware_lock); - vha = pci_get_drvdata(ha->pdev); + vha = qla2x00_get_rsp_host(rsp); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->u.isp2300.host_status); if (stat & HSR_RISC_PAUSED) { @@ -684,7 +685,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) vha->host_no)); if (IS_FWI2_CAPABLE(ha)) - qla24xx_process_response_queue(vha, rsp); + qla24xx_process_response_queue(rsp); else qla2x00_process_response_queue(rsp); break; @@ -765,10 +766,7 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data) struct qla_hw_data *ha = vha->hw; struct req_que *req = NULL; - if (!ql2xqfulltracking) - return; - - req = vha->req; + req = ha->req_q_map[vha->req_ques[0]]; if (!req) return; if (req->max_q_depth <= sdev->queue_depth) @@ -810,9 +808,6 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req, fc_port_t *fcport; struct scsi_device *sdev; - if (!ql2xqfulltracking) - return; - sdev = sp->cmd->device; if (sdev->queue_depth >= req->max_q_depth) return; @@ -863,8 +858,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha, qla2x00_ramp_up_queue_depth(vha, req, sp); qla2x00_sp_compl(ha, sp); } else { - DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion" - " handle(%d)\n", vha->host_no, req->id, index)); + DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", + vha->host_no)); qla_printk(KERN_WARNING, ha, "Invalid ISP SCSI completion handle\n"); @@ -886,7 +881,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp) uint16_t handle_cnt; uint16_t cnt; - vha = pci_get_drvdata(ha->pdev); + vha = qla2x00_get_rsp_host(rsp); if (!vha->flags.online) return; @@ -931,7 +926,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp) } break; case STATUS_CONT_TYPE: - qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); + qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); break; default: /* Type Not Supported. */ @@ -950,8 +945,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp) } static inline void -qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len, - struct rsp_que *rsp) +qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len) { struct scsi_cmnd *cp = sp->cmd; @@ -968,7 +962,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len, sp->request_sense_ptr += sense_len; sp->request_sense_length -= sense_len; if (sp->request_sense_length != 0) - rsp->status_srb = sp; + sp->fcport->vha->status_srb = sp; DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no, @@ -998,9 +992,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; uint8_t *rsp_info, *sense_data; struct qla_hw_data *ha = vha->hw; - uint32_t handle; - uint16_t que; - struct req_que *req; + struct req_que *req = rsp->req; sts = (sts_entry_t *) pkt; sts24 = (struct sts_entry_24xx *) pkt; @@ -1011,20 +1003,18 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) comp_status = le16_to_cpu(sts->comp_status); scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; } - handle = (uint32_t) LSW(sts->handle); - que = MSW(sts->handle); - req = ha->req_q_map[que]; + /* Fast path completion. */ if (comp_status == CS_COMPLETE && scsi_status == 0) { - qla2x00_process_completed_request(vha, req, handle); + qla2x00_process_completed_request(vha, req, sts->handle); return; } /* Validate handle. */ - if (handle < MAX_OUTSTANDING_COMMANDS) { - sp = req->outstanding_cmds[handle]; - req->outstanding_cmds[handle] = NULL; + if (sts->handle < MAX_OUTSTANDING_COMMANDS) { + sp = req->outstanding_cmds[sts->handle]; + req->outstanding_cmds[sts->handle] = NULL; } else sp = NULL; @@ -1040,7 +1030,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) cp = sp->cmd; if (cp == NULL) { DEBUG2(printk("scsi(%ld): Command already returned back to OS " - "pkt->handle=%d sp=%p.\n", vha->host_no, handle, sp)); + "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp)); qla_printk(KERN_WARNING, ha, "Command is NULL: already returned to OS (sp=%p)\n", sp); @@ -1131,8 +1121,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) scsi_status)); /* Adjust queue depth for all luns on the port. */ - if (!ql2xqfulltracking) - break; fcport->last_queue_full = jiffies; starget_for_each_device(cp->device->sdev_target, fcport, qla2x00_adjust_sdev_qdepth_down); @@ -1145,7 +1133,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) if (!(scsi_status & SS_SENSE_LEN_VALID)) break; - qla2x00_handle_sense(sp, sense_data, sense_len, rsp); + qla2x00_handle_sense(sp, sense_data, sense_len); break; case CS_DATA_UNDERRUN: @@ -1191,8 +1179,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) * Adjust queue depth for all luns on the * port. */ - if (!ql2xqfulltracking) - break; fcport->last_queue_full = jiffies; starget_for_each_device( cp->device->sdev_target, fcport, @@ -1206,12 +1192,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) if (!(scsi_status & SS_SENSE_LEN_VALID)) break; - qla2x00_handle_sense(sp, sense_data, sense_len, rsp); + qla2x00_handle_sense(sp, sense_data, sense_len); } else { /* * If RISC reports underrun and target does not report * it then we must have a lost frame, so tell upper - * layer to retry it by reporting an error. + * layer to retry it by reporting a bus busy. */ if (!(scsi_status & SS_RESIDUAL_UNDER)) { DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " @@ -1221,7 +1207,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) cp->device->id, cp->device->lun, resid, scsi_bufflen(cp))); - cp->result = DID_ERROR << 16; + cp->result = DID_BUS_BUSY << 16; break; } @@ -1348,7 +1334,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) } /* Place command on done queue. */ - if (rsp->status_srb == NULL) + if (vha->status_srb == NULL) qla2x00_sp_compl(ha, sp); } @@ -1360,11 +1346,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) * Extended sense data. */ static void -qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) +qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) { uint8_t sense_sz = 0; - struct qla_hw_data *ha = rsp->hw; - srb_t *sp = rsp->status_srb; + struct qla_hw_data *ha = vha->hw; + srb_t *sp = vha->status_srb; struct scsi_cmnd *cp; if (sp != NULL && sp->request_sense_length != 0) { @@ -1376,7 +1362,7 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) "cmd is NULL: already returned to OS (sp=%p)\n", sp); - rsp->status_srb = NULL; + vha->status_srb = NULL; return; } @@ -1397,7 +1383,7 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) /* Place command on done queue. */ if (sp->request_sense_length == 0) { - rsp->status_srb = NULL; + vha->status_srb = NULL; qla2x00_sp_compl(ha, sp); } } @@ -1413,9 +1399,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) { srb_t *sp; struct qla_hw_data *ha = vha->hw; - uint32_t handle = LSW(pkt->handle); - uint16_t que = MSW(pkt->handle); - struct req_que *req = ha->req_q_map[que]; + struct req_que *req = rsp->req; #if defined(QL_DEBUG_LEVEL_2) if (pkt->entry_status & RF_INV_E_ORDER) qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); @@ -1433,14 +1417,14 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) #endif /* Validate handle. */ - if (handle < MAX_OUTSTANDING_COMMANDS) - sp = req->outstanding_cmds[handle]; + if (pkt->handle < MAX_OUTSTANDING_COMMANDS) + sp = req->outstanding_cmds[pkt->handle]; else sp = NULL; if (sp) { /* Free outstanding command slot. */ - req->outstanding_cmds[handle] = NULL; + req->outstanding_cmds[pkt->handle] = NULL; /* Bad payload or header */ if (pkt->entry_status & @@ -1502,10 +1486,13 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) * qla24xx_process_response_queue() - Process response queue entries. * @ha: SCSI driver HA context */ -void qla24xx_process_response_queue(struct scsi_qla_host *vha, - struct rsp_que *rsp) +void +qla24xx_process_response_queue(struct rsp_que *rsp) { struct sts_entry_24xx *pkt; + struct scsi_qla_host *vha; + + vha = qla2x00_get_rsp_host(rsp); if (!vha->flags.online) return; @@ -1536,7 +1523,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, qla2x00_status_entry(vha, rsp, pkt); break; case STATUS_CONT_TYPE: - qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); + qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); break; case VP_RPT_ID_IOCB_TYPE: qla24xx_report_id_acquisition(vha, @@ -1639,7 +1626,7 @@ qla24xx_intr_handler(int irq, void *dev_id) status = 0; spin_lock(&ha->hardware_lock); - vha = pci_get_drvdata(ha->pdev); + vha = qla2x00_get_rsp_host(rsp); for (iter = 50; iter--; ) { stat = RD_REG_DWORD(®->host_status); if (stat & HSRX_RISC_PAUSED) { @@ -1677,7 +1664,7 @@ qla24xx_intr_handler(int irq, void *dev_id) break; case 0x13: case 0x14: - qla24xx_process_response_queue(vha, rsp); + qla24xx_process_response_queue(rsp); break; default: DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " @@ -1705,7 +1692,6 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_24xx __iomem *reg; - struct scsi_qla_host *vha; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -1718,8 +1704,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) spin_lock_irq(&ha->hardware_lock); - vha = qla25xx_get_host(rsp); - qla24xx_process_response_queue(vha, rsp); + qla24xx_process_response_queue(rsp); WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); spin_unlock_irq(&ha->hardware_lock); @@ -1732,6 +1717,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) { struct qla_hw_data *ha; struct rsp_que *rsp; + struct device_reg_24xx __iomem *reg; rsp = (struct rsp_que *) dev_id; if (!rsp) { @@ -1740,8 +1726,13 @@ qla25xx_msix_rsp_q(int irq, void *dev_id) return IRQ_NONE; } ha = rsp->hw; + reg = &ha->iobase->isp24; - queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); + spin_lock_irq(&ha->hardware_lock); + + qla24xx_process_response_queue(rsp); + + spin_unlock_irq(&ha->hardware_lock); return IRQ_HANDLED; } @@ -1769,7 +1760,7 @@ qla24xx_msix_default(int irq, void *dev_id) status = 0; spin_lock_irq(&ha->hardware_lock); - vha = pci_get_drvdata(ha->pdev); + vha = qla2x00_get_rsp_host(rsp); do { stat = RD_REG_DWORD(®->host_status); if (stat & HSRX_RISC_PAUSED) { @@ -1807,7 +1798,7 @@ qla24xx_msix_default(int irq, void *dev_id) break; case 0x13: case 0x14: - qla24xx_process_response_queue(vha, rsp); + qla24xx_process_response_queue(rsp); break; default: DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " @@ -1831,14 +1822,31 @@ qla24xx_msix_default(int irq, void *dev_id) /* Interrupt handling helpers. */ struct qla_init_msix_entry { + uint16_t entry; + uint16_t index; const char *name; irq_handler_t handler; }; -static struct qla_init_msix_entry msix_entries[3] = { - { "qla2xxx (default)", qla24xx_msix_default }, - { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, - { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, +static struct qla_init_msix_entry base_queue = { + .entry = 0, + .index = 0, + .name = "qla2xxx (default)", + .handler = qla24xx_msix_default, +}; + +static struct qla_init_msix_entry base_rsp_queue = { + .entry = 1, + .index = 1, + .name = "qla2xxx (rsp_q)", + .handler = qla24xx_msix_rsp_q, +}; + +static struct qla_init_msix_entry multi_rsp_queue = { + .entry = 1, + .index = 1, + .name = "qla2xxx (multi_q)", + .handler = qla25xx_msix_rsp_q, }; static void @@ -1865,6 +1873,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) int i, ret; struct msix_entry *entries; struct qla_msix_entry *qentry; + struct qla_init_msix_entry *msix_queue; entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, GFP_KERNEL); @@ -1891,7 +1900,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) ha->msix_count, ret); goto msix_out; } - ha->max_rsp_queues = ha->msix_count - 1; + ha->max_queues = ha->msix_count - 1; } ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * ha->msix_count, GFP_KERNEL); @@ -1909,27 +1918,45 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) qentry->rsp = NULL; } - /* Enable MSI-X vectors for the base queue */ - for (i = 0; i < 2; i++) { - qentry = &ha->msix_entries[i]; - ret = request_irq(qentry->vector, msix_entries[i].handler, - 0, msix_entries[i].name, rsp); - if (ret) { - qla_printk(KERN_WARNING, ha, + /* Enable MSI-X for AENs for queue 0 */ + qentry = &ha->msix_entries[0]; + ret = request_irq(qentry->vector, base_queue.handler, 0, + base_queue.name, rsp); + if (ret) { + qla_printk(KERN_WARNING, ha, "MSI-X: Unable to register handler -- %x/%d.\n", qentry->vector, ret); - qla24xx_disable_msix(ha); - ha->mqenable = 0; - goto msix_out; - } - qentry->have_irq = 1; - qentry->rsp = rsp; - rsp->msix = qentry; + qla24xx_disable_msix(ha); + goto msix_out; } + qentry->have_irq = 1; + qentry->rsp = rsp; /* Enable MSI-X vector for response queue update for queue 0 */ - if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) + if (ha->max_queues > 1 && ha->mqiobase) { ha->mqenable = 1; + msix_queue = &multi_rsp_queue; + qla_printk(KERN_INFO, ha, + "MQ enabled, Number of Queue Resources: %d \n", + ha->max_queues); + } else { + ha->mqenable = 0; + msix_queue = &base_rsp_queue; + } + + qentry = &ha->msix_entries[1]; + ret = request_irq(qentry->vector, msix_queue->handler, 0, + msix_queue->name, rsp); + if (ret) { + qla_printk(KERN_WARNING, ha, + "MSI-X: Unable to register handler -- %x/%d.\n", + qentry->vector, ret); + qla24xx_disable_msix(ha); + ha->mqenable = 0; + goto msix_out; + } + qentry->have_irq = 1; + qentry->rsp = rsp; msix_out: kfree(entries); @@ -2036,11 +2063,35 @@ qla2x00_free_irqs(scsi_qla_host_t *vha) } } +static struct scsi_qla_host * +qla2x00_get_rsp_host(struct rsp_que *rsp) +{ + srb_t *sp; + struct qla_hw_data *ha = rsp->hw; + struct scsi_qla_host *vha = NULL; + struct sts_entry_24xx *pkt; + struct req_que *req; + + if (rsp->id) { + pkt = (struct sts_entry_24xx *) rsp->ring_ptr; + req = rsp->req; + if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) { + sp = req->outstanding_cmds[pkt->handle]; + if (sp) + vha = sp->fcport->vha; + } + } + if (!vha) + /* handle it in base queue */ + vha = pci_get_drvdata(ha->pdev); + + return vha; +} int qla25xx_request_irq(struct rsp_que *rsp) { struct qla_hw_data *ha = rsp->hw; - struct qla_init_msix_entry *intr = &msix_entries[2]; + struct qla_init_msix_entry *intr = &multi_rsp_queue; struct qla_msix_entry *msix = rsp->msix; int ret; @@ -2055,30 +2106,3 @@ int qla25xx_request_irq(struct rsp_que *rsp) msix->rsp = rsp; return ret; } - -struct scsi_qla_host * -qla25xx_get_host(struct rsp_que *rsp) -{ - srb_t *sp; - struct qla_hw_data *ha = rsp->hw; - struct scsi_qla_host *vha = NULL; - struct sts_entry_24xx *pkt; - struct req_que *req; - uint16_t que; - uint32_t handle; - - pkt = (struct sts_entry_24xx *) rsp->ring_ptr; - que = MSW(pkt->handle); - handle = (uint32_t) LSW(pkt->handle); - req = ha->req_q_map[que]; - if (handle < MAX_OUTSTANDING_COMMANDS) { - sp = req->outstanding_cmds[handle]; - if (sp) - return sp->fcport->vha; - else - goto base_que; - } -base_que: - vha = pci_get_drvdata(ha->pdev); - return vha; -} diff --git a/trunk/drivers/scsi/qla2xxx/qla_mbx.c b/trunk/drivers/scsi/qla2xxx/qla_mbx.c index 451ece0760b0..e67c1660bf46 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_mbx.c +++ b/trunk/drivers/scsi/qla2xxx/qla_mbx.c @@ -408,7 +408,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) * Context: * Kernel context. */ -int +void qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi, uint32_t *mpi_caps, uint8_t *phy) @@ -427,8 +427,6 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, mcp->flags = 0; mcp->tov = MBX_TOV_SECONDS; rval = qla2x00_mailbox_command(vha, mcp); - if (rval != QLA_SUCCESS) - goto failed; /* Return mailbox data. */ *major = mcp->mb[1]; @@ -448,7 +446,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, phy[1] = mcp->mb[9] >> 8; phy[2] = mcp->mb[9] & 0xff; } -failed: + if (rval != QLA_SUCCESS) { /*EMPTY*/ DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, @@ -457,7 +455,6 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, /*EMPTY*/ DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } - return rval; } /* @@ -751,20 +748,20 @@ qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr, * Kernel context. */ int -qla2x00_abort_command(srb_t *sp) +qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) { unsigned long flags = 0; + fc_port_t *fcport; int rval; uint32_t handle = 0; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - fc_port_t *fcport = sp->fcport; - scsi_qla_host_t *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; - struct req_que *req = vha->req; DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no)); + fcport = sp->fcport; + spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { if (req->outstanding_cmds[handle] == sp) @@ -803,7 +800,7 @@ qla2x00_abort_command(srb_t *sp) } int -qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) +qla2x00_abort_target(struct fc_port *fcport, unsigned int l) { int rval, rval2; mbx_cmd_t mc; @@ -816,8 +813,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) l = l; vha = fcport->vha; - req = vha->hw->req_q_map[tag]; - rsp = vha->hw->rsp_q_map[tag]; + req = vha->hw->req_q_map[0]; + rsp = vha->hw->rsp_q_map[0]; mcp->mb[0] = MBC_ABORT_TARGET; mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) { @@ -853,7 +850,7 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag) } int -qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag) +qla2x00_lun_reset(struct fc_port *fcport, unsigned int l) { int rval, rval2; mbx_cmd_t mc; @@ -865,8 +862,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag) DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no)); vha = fcport->vha; - req = vha->hw->req_q_map[tag]; - rsp = vha->hw->rsp_q_map[tag]; + req = vha->hw->req_q_map[0]; + rsp = vha->hw->rsp_q_map[0]; mcp->mb[0] = MBC_LUN_RESET; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; if (HAS_EXTENDED_IDS(vha->hw)) @@ -934,8 +931,6 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_0; mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; - if (IS_QLA81XX(vha->hw)) - mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -957,19 +952,9 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n", vha->host_no, rval)); } else { + /*EMPTY*/ DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n", vha->host_no)); - - if (IS_QLA81XX(vha->hw)) { - vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; - vha->fcoe_fcf_idx = mcp->mb[10]; - vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; - vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff; - vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8; - vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff; - vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; - vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; - } } return rval; @@ -1267,7 +1252,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) mcp->mb[0] = MBC_GET_FIRMWARE_STATE; mcp->out_mb = MBX_0; - mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -1276,8 +1261,6 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) states[0] = mcp->mb[1]; states[1] = mcp->mb[2]; states[2] = mcp->mb[3]; - states[3] = mcp->mb[4]; - states[4] = mcp->mb[5]; if (rval != QLA_SUCCESS) { /*EMPTY*/ @@ -1497,17 +1480,9 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, dma_addr_t lg_dma; uint32_t iop[2]; struct qla_hw_data *ha = vha->hw; - struct req_que *req; - struct rsp_que *rsp; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - if (ql2xmultique_tag) - req = ha->req_q_map[0]; - else - req = vha->req; - rsp = req->rsp; - lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); if (lg == NULL) { DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n", @@ -1518,7 +1493,6 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; - lg->handle = MAKE_HANDLE(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI); if (opt & BIT_0) @@ -1767,8 +1741,6 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, struct logio_entry_24xx *lg; dma_addr_t lg_dma; struct qla_hw_data *ha = vha->hw; - struct req_que *req; - struct rsp_que *rsp; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); @@ -1780,14 +1752,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, } memset(lg, 0, sizeof(struct logio_entry_24xx)); - if (ql2xmaxqueues > 1) - req = ha->req_q_map[0]; - else - req = vha->req; - rsp = req->rsp; lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; - lg->handle = MAKE_HANDLE(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); @@ -1898,6 +1864,9 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; + if (IS_QLA81XX(vha->hw)) + return QLA_SUCCESS; + DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n", vha->host_no)); @@ -2226,21 +2195,21 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, } int -qla24xx_abort_command(srb_t *sp) +qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req) { int rval; + fc_port_t *fcport; unsigned long flags = 0; struct abort_entry_24xx *abt; dma_addr_t abt_dma; uint32_t handle; - fc_port_t *fcport = sp->fcport; - struct scsi_qla_host *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; - struct req_que *req = vha->req; DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); + fcport = sp->fcport; + spin_lock_irqsave(&ha->hardware_lock, flags); for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { if (req->outstanding_cmds[handle] == sp) @@ -2262,7 +2231,6 @@ qla24xx_abort_command(srb_t *sp) abt->entry_type = ABORT_IOCB_TYPE; abt->entry_count = 1; - abt->handle = MAKE_HANDLE(req->id, abt->handle); abt->nport_handle = cpu_to_le16(fcport->loop_id); abt->handle_to_abort = handle; abt->port_id[0] = fcport->d_id.b.al_pa; @@ -2304,7 +2272,7 @@ struct tsk_mgmt_cmd { static int __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, - unsigned int l, int tag) + unsigned int l) { int rval, rval2; struct tsk_mgmt_cmd *tsk; @@ -2318,11 +2286,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, vha = fcport->vha; ha = vha->hw; - req = vha->req; - if (ql2xmultique_tag) - rsp = ha->rsp_q_map[tag + 1]; - else - rsp = req->rsp; + req = ha->req_q_map[0]; + rsp = ha->rsp_q_map[0]; tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma); if (tsk == NULL) { DEBUG2_3(printk("%s(%ld): failed to allocate Task Management " @@ -2333,7 +2298,6 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; tsk->p.tsk.entry_count = 1; - tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle); tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); tsk->p.tsk.control_flags = cpu_to_le32(type); @@ -2380,15 +2344,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, } int -qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag) +qla24xx_abort_target(struct fc_port *fcport, unsigned int l) { - return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag); + return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l); } int -qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag) +qla24xx_lun_reset(struct fc_port *fcport, unsigned int l) { - return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag); + return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l); } int @@ -2482,8 +2446,6 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha) if (rval != QLA_SUCCESS) { DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__, vha->host_no, rval)); - if (mcp->mb[0] == MBS_INVALID_COMMAND) - rval = QLA_INVALID_COMMAND; } else { DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); } @@ -2755,11 +2717,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, if (vp_idx == 0) return; - if (MSB(stat) == 1) { - DEBUG2(printk("scsi(%ld): Could not acquire ID for " - "VP[%d].\n", vha->host_no, vp_idx)); + if (MSB(stat) == 1) return; - } list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) if (vp_idx == vp->vp_idx) @@ -3182,8 +3141,6 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) WRT_REG_DWORD(®->req_q_in, 0); WRT_REG_DWORD(®->req_q_out, 0); } - req->req_q_in = ®->req_q_in; - req->req_q_out = ®->req_q_out; spin_unlock_irqrestore(&ha->hardware_lock, flags); rval = qla2x00_mailbox_command(vha, mcp); @@ -3210,6 +3167,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) mcp->mb[6] = MSW(MSD(rsp->dma)); mcp->mb[7] = LSW(MSD(rsp->dma)); mcp->mb[5] = rsp->length; + mcp->mb[11] = rsp->vp_idx; mcp->mb[14] = rsp->msix->entry; mcp->mb[13] = rsp->rid; @@ -3221,7 +3179,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) mcp->mb[8] = 0; /* que out ptr index */ mcp->mb[9] = 0; - mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 + mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_0; mcp->flags = MBX_DMA_OUT; @@ -3426,7 +3384,7 @@ qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr, DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__, vha->host_no, rval, mcp->mb[0])); } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); + DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no)); } return rval; @@ -3470,141 +3428,3 @@ qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr, return rval; } - -int -qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma, - uint16_t size_in_bytes, uint16_t *actual_size) -{ - int rval; - mbx_cmd_t mc; - mbx_cmd_t *mcp = &mc; - - if (!IS_QLA81XX(vha->hw)) - return QLA_FUNCTION_FAILED; - - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - - mcp->mb[0] = MBC_GET_XGMAC_STATS; - mcp->mb[2] = MSW(stats_dma); - mcp->mb[3] = LSW(stats_dma); - mcp->mb[6] = MSW(MSD(stats_dma)); - mcp->mb[7] = LSW(MSD(stats_dma)); - mcp->mb[8] = size_in_bytes >> 2; - mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; - mcp->in_mb = MBX_2|MBX_1|MBX_0; - mcp->tov = MBX_TOV_SECONDS; - mcp->flags = 0; - rval = qla2x00_mailbox_command(vha, mcp); - - if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " - "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, - mcp->mb[0], mcp->mb[1], mcp->mb[2])); - } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); - - *actual_size = mcp->mb[2] << 2; - } - - return rval; -} - -int -qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma, - uint16_t size) -{ - int rval; - mbx_cmd_t mc; - mbx_cmd_t *mcp = &mc; - - if (!IS_QLA81XX(vha->hw)) - return QLA_FUNCTION_FAILED; - - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - - mcp->mb[0] = MBC_GET_DCBX_PARAMS; - mcp->mb[1] = 0; - mcp->mb[2] = MSW(tlv_dma); - mcp->mb[3] = LSW(tlv_dma); - mcp->mb[6] = MSW(MSD(tlv_dma)); - mcp->mb[7] = LSW(MSD(tlv_dma)); - mcp->mb[8] = size; - mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; - mcp->in_mb = MBX_2|MBX_1|MBX_0; - mcp->tov = MBX_TOV_SECONDS; - mcp->flags = 0; - rval = qla2x00_mailbox_command(vha, mcp); - - if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x " - "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval, - mcp->mb[0], mcp->mb[1], mcp->mb[2])); - } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); - } - - return rval; -} - -int -qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) -{ - int rval; - mbx_cmd_t mc; - mbx_cmd_t *mcp = &mc; - - if (!IS_FWI2_CAPABLE(vha->hw)) - return QLA_FUNCTION_FAILED; - - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - - mcp->mb[0] = MBC_READ_RAM_EXTENDED; - mcp->mb[1] = LSW(risc_addr); - mcp->mb[8] = MSW(risc_addr); - mcp->out_mb = MBX_8|MBX_1|MBX_0; - mcp->in_mb = MBX_3|MBX_2|MBX_0; - mcp->tov = 30; - mcp->flags = 0; - rval = qla2x00_mailbox_command(vha, mcp); - if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, - vha->host_no, rval, mcp->mb[0])); - } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); - *data = mcp->mb[3] << 16 | mcp->mb[2]; - } - - return rval; -} - -int -qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) -{ - int rval; - mbx_cmd_t mc; - mbx_cmd_t *mcp = &mc; - - if (!IS_FWI2_CAPABLE(vha->hw)) - return QLA_FUNCTION_FAILED; - - DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no)); - - mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED; - mcp->mb[1] = LSW(risc_addr); - mcp->mb[2] = LSW(data); - mcp->mb[3] = MSW(data); - mcp->mb[8] = MSW(risc_addr); - mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; - mcp->in_mb = MBX_0; - mcp->tov = 30; - mcp->flags = 0; - rval = qla2x00_mailbox_command(vha, mcp); - if (rval != QLA_SUCCESS) { - DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__, - vha->host_no, rval, mcp->mb[0])); - } else { - DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no)); - } - - return rval; -} diff --git a/trunk/drivers/scsi/qla2xxx/qla_mid.c b/trunk/drivers/scsi/qla2xxx/qla_mid.c index 650bcef08f2a..51716c7e3008 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_mid.c +++ b/trunk/drivers/scsi/qla2xxx/qla_mid.c @@ -398,8 +398,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); - vha->req = base_vha->req; - host->can_queue = base_vha->req->length + 128; + memset(vha->req_ques, 0, sizeof(vha->req_ques)); + vha->req_ques[0] = ha->req_q_map[0]->id; + host->can_queue = ha->req_q_map[0]->length + 128; host->this_id = 255; host->cmd_per_lun = 3; host->max_cmd_len = MAX_CMDSZ; @@ -514,53 +515,76 @@ int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos) /* Delete all queues for a given vhost */ int -qla25xx_delete_queues(struct scsi_qla_host *vha) +qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no) { int cnt, ret = 0; struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct qla_hw_data *ha = vha->hw; - /* Delete request queues */ - for (cnt = 1; cnt < ha->max_req_queues; cnt++) { - req = ha->req_q_map[cnt]; + if (que_no) { + /* Delete request queue */ + req = ha->req_q_map[que_no]; if (req) { + rsp = req->rsp; ret = qla25xx_delete_req_que(vha, req); if (ret != QLA_SUCCESS) { qla_printk(KERN_WARNING, ha, - "Couldn't delete req que %d\n", - req->id); + "Couldn't delete req que %d\n", req->id); return ret; } + /* Delete associated response queue */ + if (rsp) { + ret = qla25xx_delete_rsp_que(vha, rsp); + if (ret != QLA_SUCCESS) { + qla_printk(KERN_WARNING, ha, + "Couldn't delete rsp que %d\n", + rsp->id); + return ret; + } + } } - } - - /* Delete response queues */ - for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { - rsp = ha->rsp_q_map[cnt]; - if (rsp) { - ret = qla25xx_delete_rsp_que(vha, rsp); - if (ret != QLA_SUCCESS) { - qla_printk(KERN_WARNING, ha, - "Couldn't delete rsp que %d\n", - rsp->id); - return ret; + } else { /* delete all queues of this host */ + for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) { + /* Delete request queues */ + req = ha->req_q_map[vha->req_ques[cnt]]; + if (req && req->id) { + rsp = req->rsp; + ret = qla25xx_delete_req_que(vha, req); + if (ret != QLA_SUCCESS) { + qla_printk(KERN_WARNING, ha, + "Couldn't delete req que %d\n", + vha->req_ques[cnt]); + return ret; + } + vha->req_ques[cnt] = ha->req_q_map[0]->id; + /* Delete associated response queue */ + if (rsp && rsp->id) { + ret = qla25xx_delete_rsp_que(vha, rsp); + if (ret != QLA_SUCCESS) { + qla_printk(KERN_WARNING, ha, + "Couldn't delete rsp que %d\n", + rsp->id); + return ret; + } + } } } } + qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n", + vha->vp_idx); return ret; } int qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, - uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos) + uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos) { int ret = 0; struct req_que *req = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); uint16_t que_id = 0; device_reg_t __iomem *reg; - uint32_t cnt; req = kzalloc(sizeof(struct req_que), GFP_KERNEL); if (req == NULL) { @@ -580,8 +604,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, } mutex_lock(&ha->vport_lock); - que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); - if (que_id >= ha->max_req_queues) { + que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues); + if (que_id >= ha->max_queues) { mutex_unlock(&ha->vport_lock); qla_printk(KERN_INFO, ha, "No resources to create " "additional request queue\n"); @@ -593,10 +617,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, req->vp_idx = vp_idx; req->qos = qos; - if (rsp_que < 0) - req->rsp = NULL; - else + if (ha->rsp_q_map[rsp_que]) { req->rsp = ha->rsp_q_map[rsp_que]; + req->rsp->req = req; + } /* Use alternate PCI bus number */ if (MSB(req->rid)) options |= BIT_4; @@ -604,16 +628,13 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, if (LSB(req->rid)) options |= BIT_5; req->options = options; - - for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) - req->outstanding_cmds[cnt] = NULL; - req->current_outstanding_cmd = 1; - req->ring_ptr = req->ring; req->ring_index = 0; req->cnt = req->length; req->id = que_id; reg = ISP_QUE_REG(ha, que_id); + req->req_q_in = ®->isp25mq.req_q_in; + req->req_q_out = ®->isp25mq.req_q_out; req->max_q_depth = ha->req_q_map[0]->max_q_depth; mutex_unlock(&ha->vport_lock); @@ -633,19 +654,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, return 0; } -static void qla_do_work(struct work_struct *work) -{ - struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); - struct scsi_qla_host *vha; - - vha = qla25xx_get_host(rsp); - qla24xx_process_response_queue(vha, rsp); -} - /* create response queue */ int qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, - uint8_t vp_idx, uint16_t rid, int req) + uint8_t vp_idx, uint16_t rid) { int ret = 0; struct rsp_que *rsp = NULL; @@ -660,7 +672,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, goto que_failed; } - rsp->length = RESPONSE_ENTRY_CNT_MQ; + rsp->length = RESPONSE_ENTRY_CNT_2300; rsp->ring = dma_alloc_coherent(&ha->pdev->dev, (rsp->length + 1) * sizeof(response_t), &rsp->dma, GFP_KERNEL); @@ -671,8 +683,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, } mutex_lock(&ha->vport_lock); - que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); - if (que_id >= ha->max_rsp_queues) { + que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues); + if (que_id >= ha->max_queues) { mutex_unlock(&ha->vport_lock); qla_printk(KERN_INFO, ha, "No resources to create " "additional response queue\n"); @@ -696,6 +708,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, if (LSB(rsp->rid)) options |= BIT_5; rsp->options = options; + rsp->ring_ptr = rsp->ring; + rsp->ring_index = 0; rsp->id = que_id; reg = ISP_QUE_REG(ha, que_id); rsp->rsp_q_in = ®->isp25mq.rsp_q_in; @@ -714,14 +728,9 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, mutex_unlock(&ha->vport_lock); goto que_failed; } - if (req >= 0) - rsp->req = ha->req_q_map[req]; - else - rsp->req = NULL; qla2x00_init_response_q_entries(rsp); - if (rsp->hw->wq) - INIT_WORK(&rsp->q_work, qla_do_work); + return rsp->id; que_failed: @@ -735,16 +744,14 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos) uint16_t options = 0; uint8_t ret = 0; struct qla_hw_data *ha = vha->hw; - struct rsp_que *rsp; options |= BIT_1; - ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0, -1); + ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0); if (!ret) { qla_printk(KERN_WARNING, ha, "Response Que create failed\n"); return ret; } else qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret); - rsp = ha->rsp_q_map[ret]; options = 0; if (qos & BIT_7) @@ -752,11 +759,10 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos) ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret, qos & ~BIT_7); if (ret) { - vha->req = ha->req_q_map[ret]; + vha->req_ques[0] = ret; qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret); } else qla_printk(KERN_WARNING, ha, "Request Que create failed\n"); - rsp->req = ha->req_q_map[ret]; return ret; } diff --git a/trunk/drivers/scsi/qla2xxx/qla_os.c b/trunk/drivers/scsi/qla2xxx/qla_os.c index dcf011679c8b..e4fdcdad80d0 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_os.c +++ b/trunk/drivers/scsi/qla2xxx/qla_os.c @@ -77,14 +77,6 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xmaxqdepth, "Maximum queue depth to report for target devices."); -int ql2xqfulltracking = 1; -module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR); -MODULE_PARM_DESC(ql2xqfulltracking, - "Controls whether the driver tracks queue full status " - "returns and dynamically adjusts a scsi device's queue " - "depth. Default is 1, perform tracking. Set to 0 to " - "disable dynamic tracking and adjustment of queue depth."); - int ql2xqfullrampup = 120; module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xqfullrampup, @@ -104,23 +96,6 @@ MODULE_PARM_DESC(ql2xmaxqueues, "Enables MQ settings " "Default is 1 for single queue. Set it to number \ of queues in MQ mode."); - -int ql2xmultique_tag; -module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR); -MODULE_PARM_DESC(ql2xmultique_tag, - "Enables CPU affinity settings for the driver " - "Default is 0 for no affinity of request and response IO. " - "Set it to 1 to turn on the cpu affinity."); - -int ql2xfwloadbin; -module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR); -MODULE_PARM_DESC(ql2xfwloadbin, - "Option to specify location from which to load ISP firmware:\n" - " 2 -- load firmware via the request_firmware() (hotplug)\n" - " interface.\n" - " 1 -- load firmware from flash.\n" - " 0 -- use default semantics.\n"); - /* * SCSI host template entry points */ @@ -212,7 +187,7 @@ static void qla2x00_sp_free_dma(srb_t *); /* -------------------------------------------------------------------------- */ static int qla2x00_alloc_queues(struct qla_hw_data *ha) { - ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, + ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues, GFP_KERNEL); if (!ha->req_q_map) { qla_printk(KERN_WARNING, ha, @@ -220,7 +195,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha) goto fail_req_map; } - ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues, + ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues, GFP_KERNEL); if (!ha->rsp_q_map) { qla_printk(KERN_WARNING, ha, @@ -238,18 +213,8 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha) return -ENOMEM; } -static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) -{ - if (req && req->ring) - dma_free_coherent(&ha->pdev->dev, - (req->length + 1) * sizeof(request_t), - req->ring, req->dma); - - kfree(req); - req = NULL; -} - -static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) +static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req, + struct rsp_que *rsp) { if (rsp && rsp->ring) dma_free_coherent(&ha->pdev->dev, @@ -258,6 +223,13 @@ static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) kfree(rsp); rsp = NULL; + if (req && req->ring) + dma_free_coherent(&ha->pdev->dev, + (req->length + 1) * sizeof(request_t), + req->ring, req->dma); + + kfree(req); + req = NULL; } static void qla2x00_free_queues(struct qla_hw_data *ha) @@ -266,60 +238,16 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) struct rsp_que *rsp; int cnt; - for (cnt = 0; cnt < ha->max_req_queues; cnt++) { - req = ha->req_q_map[cnt]; - qla2x00_free_req_que(ha, req); - } - kfree(ha->req_q_map); - ha->req_q_map = NULL; - - for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { + for (cnt = 0; cnt < ha->max_queues; cnt++) { rsp = ha->rsp_q_map[cnt]; - qla2x00_free_rsp_que(ha, rsp); + req = ha->req_q_map[cnt]; + qla2x00_free_que(ha, req, rsp); } kfree(ha->rsp_q_map); ha->rsp_q_map = NULL; -} -static int qla25xx_setup_mode(struct scsi_qla_host *vha) -{ - uint16_t options = 0; - int ques, req, ret; - struct qla_hw_data *ha = vha->hw; - - if (ql2xmultique_tag) { - /* CPU affinity mode */ - ha->wq = create_workqueue("qla2xxx_wq"); - /* create a request queue for IO */ - options |= BIT_7; - req = qla25xx_create_req_que(ha, options, 0, 0, -1, - QLA_DEFAULT_QUE_QOS); - if (!req) { - qla_printk(KERN_WARNING, ha, - "Can't create request queue\n"); - goto fail; - } - vha->req = ha->req_q_map[req]; - options |= BIT_1; - for (ques = 1; ques < ha->max_rsp_queues; ques++) { - ret = qla25xx_create_rsp_que(ha, options, 0, 0, req); - if (!ret) { - qla_printk(KERN_WARNING, ha, - "Response Queue create failed\n"); - goto fail2; - } - } - DEBUG2(qla_printk(KERN_INFO, ha, - "CPU affinity mode enabled, no. of response" - " queues:%d, no. of request queues:%d\n", - ha->max_rsp_queues, ha->max_req_queues)); - } - return 0; -fail2: - qla25xx_delete_queues(vha); -fail: - ha->mqenable = 0; - return 1; + kfree(ha->req_q_map); + ha->req_q_map = NULL; } static char * @@ -459,6 +387,7 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport, sp->fcport = fcport; sp->cmd = cmd; + sp->que = ha->req_q_map[0]; sp->flags = 0; CMD_SP(cmd) = (void *)sp; cmd->scsi_done = done; @@ -683,7 +612,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha) void qla2x00_abort_fcport_cmds(fc_port_t *fcport) { - int cnt; + int cnt, que, id; unsigned long flags; srb_t *sp; scsi_qla_host_t *vha = fcport->vha; @@ -691,27 +620,32 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport) struct req_que *req; spin_lock_irqsave(&ha->hardware_lock, flags); - req = vha->req; - for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { - sp = req->outstanding_cmds[cnt]; - if (!sp) - continue; - if (sp->fcport != fcport) + for (que = 0; que < QLA_MAX_HOST_QUES; que++) { + id = vha->req_ques[que]; + req = ha->req_q_map[id]; + if (!req) continue; + for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { + sp = req->outstanding_cmds[cnt]; + if (!sp) + continue; + if (sp->fcport != fcport) + continue; - spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (ha->isp_ops->abort_command(sp)) { - DEBUG2(qla_printk(KERN_WARNING, ha, - "Abort failed -- %lx\n", - sp->cmd->serial_number)); - } else { - if (qla2x00_eh_wait_on_command(sp->cmd) != - QLA_SUCCESS) + spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (ha->isp_ops->abort_command(vha, sp, req)) { DEBUG2(qla_printk(KERN_WARNING, ha, - "Abort failed while waiting -- %lx\n", + "Abort failed -- %lx\n", sp->cmd->serial_number)); + } else { + if (qla2x00_eh_wait_on_command(sp->cmd) != + QLA_SUCCESS) + DEBUG2(qla_printk(KERN_WARNING, ha, + "Abort failed while waiting -- %lx\n", + sp->cmd->serial_number)); + } + spin_lock_irqsave(&ha->hardware_lock, flags); } - spin_lock_irqsave(&ha->hardware_lock, flags); } spin_unlock_irqrestore(&ha->hardware_lock, flags); } @@ -759,7 +693,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) unsigned long flags; int wait = 0; struct qla_hw_data *ha = vha->hw; - struct req_que *req = vha->req; + struct req_que *req; srb_t *spt; qla2x00_block_error_handler(cmd); @@ -775,6 +709,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) spt = (srb_t *) CMD_SP(cmd); if (!spt) return SUCCESS; + req = spt->que; /* Check active list for command command. */ spin_lock_irqsave(&ha->hardware_lock, flags); @@ -791,7 +726,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) " pid=%ld.\n", __func__, vha->host_no, sp, serial)); spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (ha->isp_ops->abort_command(sp)) { + if (ha->isp_ops->abort_command(vha, sp, req)) { DEBUG2(printk("%s(%ld): abort_command " "mbx failed.\n", __func__, vha->host_no)); ret = FAILED; @@ -842,7 +777,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, return status; spin_lock_irqsave(&ha->hardware_lock, flags); - req = vha->req; + req = sp->que; for (cnt = 1; status == QLA_SUCCESS && cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { sp = req->outstanding_cmds[cnt]; @@ -885,7 +820,7 @@ static char *reset_errors[] = { static int __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, - struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int)) + struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int)) { scsi_qla_host_t *vha = shost_priv(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; @@ -906,8 +841,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) goto eh_reset_failed; err = 2; - if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1) - != QLA_SUCCESS) + if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS) goto eh_reset_failed; err = 3; if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, @@ -1062,9 +996,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) if (qla2x00_vp_abort_isp(vha)) goto eh_host_reset_lock; } else { - if (ha->wq) - flush_workqueue(ha->wq); - set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); if (qla2x00_abort_isp(base_vha)) { clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); @@ -1106,8 +1037,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) struct fc_port *fcport; struct qla_hw_data *ha = vha->hw; - if (ha->flags.enable_lip_full_login && !vha->vp_idx && - !IS_QLA81XX(ha)) { + if (ha->flags.enable_lip_full_login && !vha->vp_idx) { ret = qla2x00_full_login_lip(vha); if (ret != QLA_SUCCESS) { DEBUG2_3(printk("%s(%ld): failed: " @@ -1134,7 +1064,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) if (fcport->port_type != FCT_TARGET) continue; - ret = ha->isp_ops->target_reset(fcport, 0, 0); + ret = ha->isp_ops->target_reset(fcport, 0); if (ret != QLA_SUCCESS) { DEBUG2_3(printk("%s(%ld): bus_reset failed: " "target_reset=%d d_id=%x.\n", __func__, @@ -1158,7 +1088,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) struct req_que *req; spin_lock_irqsave(&ha->hardware_lock, flags); - for (que = 0; que < ha->max_req_queues; que++) { + for (que = 0; que < ha->max_queues; que++) { req = ha->req_q_map[que]; if (!req) continue; @@ -1193,7 +1123,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev) scsi_qla_host_t *vha = shost_priv(sdev->host); struct qla_hw_data *ha = vha->hw; struct fc_rport *rport = starget_to_rport(sdev->sdev_target); - struct req_que *req = vha->req; + struct req_que *req = ha->req_q_map[vha->req_ques[0]]; if (sdev->tagged_supported) scsi_activate_tcq(sdev, req->max_q_depth); @@ -1581,13 +1511,6 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; } - - /* Get adapter physical port no from interrupt pin register. */ - pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); - if (ha->port_no & 1) - ha->flags.port0 = 1; - else - ha->flags.port0 = 0; } static int @@ -1595,7 +1518,6 @@ qla2x00_iospace_config(struct qla_hw_data *ha) { resource_size_t pio; uint16_t msix; - int cpus; if (pci_request_selected_regions(ha->pdev, ha->bars, QLA2XXX_DRIVER_NAME)) { @@ -1649,9 +1571,8 @@ qla2x00_iospace_config(struct qla_hw_data *ha) } /* Determine queue resources */ - ha->max_req_queues = ha->max_rsp_queues = 1; - if ((ql2xmaxqueues <= 1 || ql2xmultique_tag < 1) && - (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) + ha->max_queues = 1; + if (ql2xmaxqueues <= 1 || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) goto mqiobase_exit; ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3), pci_resource_len(ha->pdev, 3)); @@ -1661,24 +1582,18 @@ qla2x00_iospace_config(struct qla_hw_data *ha) ha->msix_count = msix; /* Max queues are bounded by available msix vectors */ /* queue 0 uses two msix vectors */ - if (ql2xmultique_tag) { - cpus = num_online_cpus(); - ha->max_rsp_queues = (ha->msix_count - 1 - cpus) ? - (cpus + 1) : (ha->msix_count - 1); - ha->max_req_queues = 2; - } else if (ql2xmaxqueues > 1) { - ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ? - QLA_MQ_SIZE : ql2xmaxqueues; - DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no" - " of request queues:%d\n", ha->max_req_queues)); - } + if (ha->msix_count - 1 < ql2xmaxqueues) + ha->max_queues = ha->msix_count - 1; + else if (ql2xmaxqueues > QLA_MQ_SIZE) + ha->max_queues = QLA_MQ_SIZE; + else + ha->max_queues = ql2xmaxqueues; qla_printk(KERN_INFO, ha, "MSI-X vector count: %d\n", msix); - } else - qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n"); + } mqiobase_exit: - ha->msix_count = ha->max_rsp_queues + 1; + ha->msix_count = ha->max_queues + 1; return (0); iospace_error_exit: @@ -1690,9 +1605,6 @@ qla2xxx_scan_start(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); - if (vha->hw->flags.running_gold_fw) - return; - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(RSCN_UPDATE, &vha->dpc_flags); @@ -1856,7 +1768,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->init_cb_size = sizeof(struct mid_init_cb_81xx); ha->gid_list_info_size = 8; ha->optrom_size = OPTROM_SIZE_81XX; - ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; ha->isp_ops = &qla81xx_isp_ops; ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; @@ -1892,15 +1803,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ret = -ENOMEM; qla2x00_mem_free(ha); - qla2x00_free_req_que(ha, req); - qla2x00_free_rsp_que(ha, rsp); + qla2x00_free_que(ha, req, rsp); goto probe_hw_failed; } pci_set_drvdata(pdev, base_vha); host = base_vha->host; - base_vha->req = req; + base_vha->req_ques[0] = req->id; host->can_queue = req->length + 128; if (IS_QLA2XXX_MIDTYPE(ha)) base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx; @@ -1931,10 +1841,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) } ha->rsp_q_map[0] = rsp; ha->req_q_map[0] = req; - rsp->req = req; - req->rsp = rsp; - set_bit(0, ha->req_qid_map); - set_bit(0, ha->rsp_qid_map); + /* FWI2-capable only. */ req->req_q_in = &ha->iobase->isp24.req_q_in; req->req_q_out = &ha->iobase->isp24.req_q_out; @@ -1959,15 +1866,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) goto probe_failed; } - if (ha->mqenable) - if (qla25xx_setup_mode(base_vha)) - qla_printk(KERN_WARNING, ha, - "Can't create queues, falling back to single" - " queue mode\n"); - - if (ha->flags.running_gold_fw) - goto skip_dpc; - /* * Startup the kernel thread for this host adapter */ @@ -1980,7 +1878,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) goto probe_failed; } -skip_dpc: list_add_tail(&base_vha->list, &ha->vp_list); base_vha->host->irq = ha->pdev->irq; @@ -2020,9 +1917,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) return 0; probe_init_failed: - qla2x00_free_req_que(ha, req); - qla2x00_free_rsp_que(ha, rsp); - ha->max_req_queues = ha->max_rsp_queues = 0; + qla2x00_free_que(ha, req, rsp); + ha->max_queues = 0; probe_failed: if (base_vha->timer_active) @@ -2080,13 +1976,6 @@ qla2x00_remove_one(struct pci_dev *pdev) base_vha->flags.online = 0; - /* Flush the work queue and remove it */ - if (ha->wq) { - flush_workqueue(ha->wq); - destroy_workqueue(ha->wq); - ha->wq = NULL; - } - /* Kill the kernel thread for this host */ if (ha->dpc_thread) { struct task_struct *t = ha->dpc_thread; @@ -2128,8 +2017,6 @@ qla2x00_free_device(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; - qla25xx_delete_queues(vha); - if (ha->flags.fce_enabled) qla2x00_disable_fce_trace(vha, NULL, NULL); @@ -2442,14 +2329,6 @@ qla2x00_mem_free(struct qla_hw_data *ha) vfree(ha->fw_dump); } - if (ha->dcbx_tlv) - dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, - ha->dcbx_tlv, ha->dcbx_tlv_dma); - - if (ha->xgmac_data) - dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE, - ha->xgmac_data, ha->xgmac_data_dma); - if (ha->sns_cmd) dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), ha->sns_cmd, ha->sns_cmd_dma); @@ -2533,8 +2412,6 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, INIT_LIST_HEAD(&vha->work_list); INIT_LIST_HEAD(&vha->list); - spin_lock_init(&vha->work_lock); - sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); return vha; @@ -2543,11 +2420,13 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, } static struct qla_work_evt * -qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) +qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type, + int locked) { struct qla_work_evt *e; - e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC); + e = kzalloc(sizeof(struct qla_work_evt), locked ? GFP_ATOMIC: + GFP_KERNEL); if (!e) return NULL; @@ -2558,15 +2437,17 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) } static int -qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) +qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e, int locked) { - unsigned long flags; + unsigned long uninitialized_var(flags); + struct qla_hw_data *ha = vha->hw; - spin_lock_irqsave(&vha->work_lock, flags); + if (!locked) + spin_lock_irqsave(&ha->hardware_lock, flags); list_add_tail(&e->list, &vha->work_list); - spin_unlock_irqrestore(&vha->work_lock, flags); qla2xxx_wake_dpc(vha); - + if (!locked) + spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; } @@ -2576,13 +2457,13 @@ qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code, { struct qla_work_evt *e; - e = qla2x00_alloc_work(vha, QLA_EVT_AEN); + e = qla2x00_alloc_work(vha, QLA_EVT_AEN, 1); if (!e) return QLA_FUNCTION_FAILED; e->u.aen.code = code; e->u.aen.data = data; - return qla2x00_post_work(vha, e); + return qla2x00_post_work(vha, e, 1); } int @@ -2590,27 +2471,25 @@ qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb) { struct qla_work_evt *e; - e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK); + e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK, 1); if (!e) return QLA_FUNCTION_FAILED; memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); - return qla2x00_post_work(vha, e); + return qla2x00_post_work(vha, e, 1); } static void qla2x00_do_work(struct scsi_qla_host *vha) { - struct qla_work_evt *e, *tmp; - unsigned long flags; - LIST_HEAD(work); - - spin_lock_irqsave(&vha->work_lock, flags); - list_splice_init(&vha->work_list, &work); - spin_unlock_irqrestore(&vha->work_lock, flags); + struct qla_work_evt *e; + struct qla_hw_data *ha = vha->hw; - list_for_each_entry_safe(e, tmp, &work, list) { + spin_lock_irq(&ha->hardware_lock); + while (!list_empty(&vha->work_list)) { + e = list_entry(vha->work_list.next, struct qla_work_evt, list); list_del_init(&e->list); + spin_unlock_irq(&ha->hardware_lock); switch (e->type) { case QLA_EVT_AEN: @@ -2623,9 +2502,10 @@ qla2x00_do_work(struct scsi_qla_host *vha) } if (e->flags & QLA_EVT_FLAG_FREE) kfree(e); + spin_lock_irq(&ha->hardware_lock); } + spin_unlock_irq(&ha->hardware_lock); } - /* Relogins all the fcports of a vport * Context: dpc thread */ diff --git a/trunk/drivers/scsi/qla2xxx/qla_sup.c b/trunk/drivers/scsi/qla2xxx/qla_sup.c index 6260505dceb5..152ecfc26cd2 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_sup.c +++ b/trunk/drivers/scsi/qla2xxx/qla_sup.c @@ -219,8 +219,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data) wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { - DEBUG9_10(qla_printk(KERN_WARNING, ha, - "NVRAM didn't go ready...\n")); + DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n", + __func__, vha->host_no)); break; } NVRAM_DELAY(); @@ -349,7 +349,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha) wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { - DEBUG9_10(qla_printk(KERN_WARNING, ha, + DEBUG9_10(qla_printk( "NVRAM didn't go ready...\n")); break; } @@ -408,8 +408,7 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat) wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { - DEBUG9_10(qla_printk(KERN_WARNING, ha, - "NVRAM didn't go ready...\n")); + DEBUG9_10(qla_printk("NVRAM didn't go ready...\n")); break; } NVRAM_DELAY(); @@ -702,35 +701,32 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) break; case FLT_REG_VPD_0: ha->flt_region_vpd_nvram = start; - if (ha->flags.port0) + if (!(PCI_FUNC(ha->pdev->devfn) & 1)) ha->flt_region_vpd = start; break; case FLT_REG_VPD_1: - if (!ha->flags.port0) + if (PCI_FUNC(ha->pdev->devfn) & 1) ha->flt_region_vpd = start; break; case FLT_REG_NVRAM_0: - if (ha->flags.port0) + if (!(PCI_FUNC(ha->pdev->devfn) & 1)) ha->flt_region_nvram = start; break; case FLT_REG_NVRAM_1: - if (!ha->flags.port0) + if (PCI_FUNC(ha->pdev->devfn) & 1) ha->flt_region_nvram = start; break; case FLT_REG_FDT: ha->flt_region_fdt = start; break; case FLT_REG_NPIV_CONF_0: - if (ha->flags.port0) + if (!(PCI_FUNC(ha->pdev->devfn) & 1)) ha->flt_region_npiv_conf = start; break; case FLT_REG_NPIV_CONF_1: - if (!ha->flags.port0) + if (PCI_FUNC(ha->pdev->devfn) & 1) ha->flt_region_npiv_conf = start; break; - case FLT_REG_GOLD_FW: - ha->flt_region_gold_fw = start; - break; } } goto done; @@ -748,12 +744,12 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) ha->flt_region_fw = def_fw[def]; ha->flt_region_boot = def_boot[def]; ha->flt_region_vpd_nvram = def_vpd_nvram[def]; - ha->flt_region_vpd = ha->flags.port0 ? + ha->flt_region_vpd = !(PCI_FUNC(ha->pdev->devfn) & 1) ? def_vpd0[def]: def_vpd1[def]; - ha->flt_region_nvram = ha->flags.port0 ? + ha->flt_region_nvram = !(PCI_FUNC(ha->pdev->devfn) & 1) ? def_nvram0[def]: def_nvram1[def]; ha->flt_region_fdt = def_fdt[def]; - ha->flt_region_npiv_conf = ha->flags.port0 ? + ha->flt_region_npiv_conf = !(PCI_FUNC(ha->pdev->devfn) & 1) ? def_npiv_conf0[def]: def_npiv_conf1[def]; done: DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x " @@ -928,8 +924,6 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) struct fc_vport_identifiers vid; struct fc_vport *vport; - memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry)); - flags = le16_to_cpu(entry->flags); if (flags == 0xffff) continue; @@ -943,7 +937,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) vid.port_name = wwn_to_u64(entry->port_name); vid.node_name = wwn_to_u64(entry->node_name); - DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx " + memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry)); + + DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx " "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id), entry->q_qos, entry->f_qos)); @@ -959,6 +955,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) } done: kfree(data); + ha->npiv_info = NULL; } static int @@ -1082,9 +1079,8 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, 0xff0000) | ((fdata >> 16) & 0xff)); ret = qla24xx_erase_sector(vha, fdata); if (ret != QLA_SUCCESS) { - DEBUG9(qla_printk(KERN_WARNING, ha, - "Unable to erase sector: address=%x.\n", - faddr)); + DEBUG9(qla_printk("Unable to erase sector: " + "address=%x.\n", faddr)); break; } } @@ -1244,9 +1240,8 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, ret = qla24xx_write_flash_dword(ha, nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr)); if (ret != QLA_SUCCESS) { - DEBUG9(qla_printk(KERN_WARNING, ha, - "Unable to program nvram address=%x data=%x.\n", - naddr, *dwptr)); + DEBUG9(qla_printk("Unable to program nvram address=%x " + "data=%x.\n", naddr, *dwptr)); break; } } diff --git a/trunk/drivers/scsi/qla2xxx/qla_version.h b/trunk/drivers/scsi/qla2xxx/qla_version.h index b63feaf43126..19d1afc3a343 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_version.h +++ b/trunk/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.01-k3" +#define QLA2XXX_VERSION "8.03.01-k1" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 diff --git a/trunk/drivers/scsi/scsi.c b/trunk/drivers/scsi/scsi.c index 2de5f3ad640b..166417a6afba 100644 --- a/trunk/drivers/scsi/scsi.c +++ b/trunk/drivers/scsi/scsi.c @@ -1225,8 +1225,8 @@ EXPORT_SYMBOL(__scsi_device_lookup_by_target); * @starget: SCSI target pointer * @lun: SCSI Logical Unit Number * - * Description: Looks up the scsi_device with the specified @lun for a given - * @starget. The returned scsi_device has an additional reference that + * Description: Looks up the scsi_device with the specified @channel, @id, @lun + * for a given host. The returned scsi_device has an additional reference that * needs to be released with scsi_device_put once you're done with it. **/ struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget, diff --git a/trunk/drivers/scsi/scsi_debug.c b/trunk/drivers/scsi/scsi_debug.c index 41a21772df12..213123b0486b 100644 --- a/trunk/drivers/scsi/scsi_debug.c +++ b/trunk/drivers/scsi/scsi_debug.c @@ -887,7 +887,7 @@ static int resp_start_stop(struct scsi_cmnd * scp, static sector_t get_sdebug_capacity(void) { if (scsi_debug_virtual_gb > 0) - return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb; + return 2048 * 1024 * scsi_debug_virtual_gb; else return sdebug_store_sectors; } diff --git a/trunk/drivers/scsi/scsi_error.c b/trunk/drivers/scsi/scsi_error.c index a1689353d7fd..0c2c73be1974 100644 --- a/trunk/drivers/scsi/scsi_error.c +++ b/trunk/drivers/scsi/scsi_error.c @@ -641,9 +641,9 @@ EXPORT_SYMBOL(scsi_eh_prep_cmnd); /** * scsi_eh_restore_cmnd - Restore a scsi command info as part of error recory * @scmd: SCSI command structure to restore - * @ses: saved information from a coresponding call to scsi_eh_prep_cmnd + * @ses: saved information from a coresponding call to scsi_prep_eh_cmnd * - * Undo any damage done by above scsi_eh_prep_cmnd(). + * Undo any damage done by above scsi_prep_eh_cmnd(). */ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses) { @@ -1451,21 +1451,28 @@ static void eh_lock_door_done(struct request *req, int uptodate) * @sdev: SCSI device to prevent medium removal * * Locking: - * We must be called from process context. + * We must be called from process context; scsi_allocate_request() + * may sleep. * * Notes: * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the * head of the devices request queue, and continue. + * + * Bugs: + * scsi_allocate_request() may sleep waiting for existing requests to + * be processed. However, since we haven't kicked off any request + * processing for this host, this may deadlock. + * + * If scsi_allocate_request() fails for what ever reason, we + * completely forget to lock the door. */ static void scsi_eh_lock_door(struct scsi_device *sdev) { struct request *req; - /* - * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a - * request becomes available - */ req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); + if (!req) + return; req->cmd[0] = ALLOW_MEDIUM_REMOVAL; req->cmd[1] = 0; diff --git a/trunk/drivers/scsi/scsi_lib.c b/trunk/drivers/scsi/scsi_lib.c index 30f3275e119e..dd3f9d2b99fd 100644 --- a/trunk/drivers/scsi/scsi_lib.c +++ b/trunk/drivers/scsi/scsi_lib.c @@ -2412,18 +2412,20 @@ int scsi_internal_device_unblock(struct scsi_device *sdev) { struct request_queue *q = sdev->request_queue; + int err; unsigned long flags; /* * Try to transition the scsi device to SDEV_RUNNING * and goose the device queue if successful. */ - if (sdev->sdev_state == SDEV_BLOCK) - sdev->sdev_state = SDEV_RUNNING; - else if (sdev->sdev_state == SDEV_CREATED_BLOCK) - sdev->sdev_state = SDEV_CREATED; - else - return -EINVAL; + err = scsi_device_set_state(sdev, SDEV_RUNNING); + if (err) { + err = scsi_device_set_state(sdev, SDEV_CREATED); + + if (err) + return err; + } spin_lock_irqsave(q->queue_lock, flags); blk_start_queue(q); diff --git a/trunk/drivers/scsi/scsi_scan.c b/trunk/drivers/scsi/scsi_scan.c index c44783801402..e2b50d8f57a8 100644 --- a/trunk/drivers/scsi/scsi_scan.c +++ b/trunk/drivers/scsi/scsi_scan.c @@ -115,12 +115,12 @@ MODULE_PARM_DESC(max_report_luns, "REPORT LUNS maximum number of LUNS received (should be" " between 1 and 16384)"); -static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18; +static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ+3; module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(inq_timeout, "Timeout (in seconds) waiting for devices to answer INQUIRY." - " Default is 20. Some devices may need more; most need less."); + " Default is 5. Some non-compliant devices need more."); /* This lock protects only this list */ static DEFINE_SPINLOCK(async_scan_lock); diff --git a/trunk/drivers/scsi/scsi_transport_iscsi.c b/trunk/drivers/scsi/scsi_transport_iscsi.c index f3e664628d7a..0a2ce7b6325c 100644 --- a/trunk/drivers/scsi/scsi_transport_iscsi.c +++ b/trunk/drivers/scsi/scsi_transport_iscsi.c @@ -37,6 +37,7 @@ #define ISCSI_TRANSPORT_VERSION "2.0-870" struct iscsi_internal { + int daemon_pid; struct scsi_transport_template t; struct iscsi_transport *iscsi_transport; struct list_head list; @@ -937,9 +938,23 @@ iscsi_if_transport_lookup(struct iscsi_transport *tt) } static int -iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp) +iscsi_broadcast_skb(struct sk_buff *skb, gfp_t gfp) { - return nlmsg_multicast(nls, skb, 0, group, gfp); + return netlink_broadcast(nls, skb, 0, 1, gfp); +} + +static int +iscsi_unicast_skb(struct sk_buff *skb, int pid) +{ + int rc; + + rc = netlink_unicast(nls, skb, pid, MSG_DONTWAIT); + if (rc < 0) { + printk(KERN_ERR "iscsi: can not unicast skb (%d)\n", rc); + return rc; + } + + return 0; } int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, @@ -965,7 +980,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, return -ENOMEM; } - nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); + nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); ev = NLMSG_DATA(nlh); memset(ev, 0, sizeof(*ev)); ev->transport_handle = iscsi_handle(conn->transport); @@ -976,45 +991,10 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, memcpy(pdu, hdr, sizeof(struct iscsi_hdr)); memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size); - return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); + return iscsi_unicast_skb(skb, priv->daemon_pid); } EXPORT_SYMBOL_GPL(iscsi_recv_pdu); -int iscsi_offload_mesg(struct Scsi_Host *shost, - struct iscsi_transport *transport, uint32_t type, - char *data, uint16_t data_size) -{ - struct nlmsghdr *nlh; - struct sk_buff *skb; - struct iscsi_uevent *ev; - int len = NLMSG_SPACE(sizeof(*ev) + data_size); - - skb = alloc_skb(len, GFP_NOIO); - if (!skb) { - printk(KERN_ERR "can not deliver iscsi offload message:OOM\n"); - return -ENOMEM; - } - - nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); - ev = NLMSG_DATA(nlh); - memset(ev, 0, sizeof(*ev)); - ev->type = type; - ev->transport_handle = iscsi_handle(transport); - switch (type) { - case ISCSI_KEVENT_PATH_REQ: - ev->r.req_path.host_no = shost->host_no; - break; - case ISCSI_KEVENT_IF_DOWN: - ev->r.notify_if_down.host_no = shost->host_no; - break; - } - - memcpy((char *)ev + sizeof(*ev), data, data_size); - - return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_NOIO); -} -EXPORT_SYMBOL_GPL(iscsi_offload_mesg); - void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) { struct nlmsghdr *nlh; @@ -1034,7 +1014,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) return; } - nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); + nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); ev = NLMSG_DATA(nlh); ev->transport_handle = iscsi_handle(conn->transport); ev->type = ISCSI_KEVENT_CONN_ERROR; @@ -1042,7 +1022,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) ev->r.connerror.cid = conn->cid; ev->r.connerror.sid = iscsi_conn_get_sid(conn); - iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); + iscsi_broadcast_skb(skb, GFP_ATOMIC); iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n", error); @@ -1050,8 +1030,8 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) EXPORT_SYMBOL_GPL(iscsi_conn_error_event); static int -iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, - void *payload, int size) +iscsi_if_send_reply(int pid, int seq, int type, int done, int multi, + void *payload, int size) { struct sk_buff *skb; struct nlmsghdr *nlh; @@ -1065,10 +1045,10 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, return -ENOMEM; } - nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0); + nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0); nlh->nlmsg_flags = flags; memcpy(NLMSG_DATA(nlh), payload, size); - return iscsi_multicast_skb(skb, group, GFP_ATOMIC); + return iscsi_unicast_skb(skb, pid); } static int @@ -1105,7 +1085,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) return -ENOMEM; } - nlhstat = __nlmsg_put(skbstat, 0, 0, 0, + nlhstat = __nlmsg_put(skbstat, priv->daemon_pid, 0, 0, (len - sizeof(*nlhstat)), 0); evstat = NLMSG_DATA(nlhstat); memset(evstat, 0, sizeof(*evstat)); @@ -1129,8 +1109,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) skb_trim(skbstat, NLMSG_ALIGN(actual_size)); nlhstat->nlmsg_len = actual_size; - err = iscsi_multicast_skb(skbstat, ISCSI_NL_GRP_ISCSID, - GFP_ATOMIC); + err = iscsi_unicast_skb(skbstat, priv->daemon_pid); } while (err < 0 && err != -ECONNREFUSED); return err; @@ -1164,7 +1143,7 @@ int iscsi_session_event(struct iscsi_cls_session *session, return -ENOMEM; } - nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); + nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0); ev = NLMSG_DATA(nlh); ev->transport_handle = iscsi_handle(session->transport); @@ -1193,7 +1172,7 @@ int iscsi_session_event(struct iscsi_cls_session *session, * this will occur if the daemon is not up, so we just warn * the user and when the daemon is restarted it will handle it */ - rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL); + rc = iscsi_broadcast_skb(skb, GFP_KERNEL); if (rc == -ESRCH) iscsi_cls_session_printk(KERN_ERR, session, "Cannot notify userspace of session " @@ -1289,54 +1268,26 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev) return err; } -static int iscsi_if_ep_connect(struct iscsi_transport *transport, - struct iscsi_uevent *ev, int msg_type) -{ - struct iscsi_endpoint *ep; - struct sockaddr *dst_addr; - struct Scsi_Host *shost = NULL; - int non_blocking, err = 0; - - if (!transport->ep_connect) - return -EINVAL; - - if (msg_type == ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST) { - shost = scsi_host_lookup(ev->u.ep_connect_through_host.host_no); - if (!shost) { - printk(KERN_ERR "ep connect failed. Could not find " - "host no %u\n", - ev->u.ep_connect_through_host.host_no); - return -ENODEV; - } - non_blocking = ev->u.ep_connect_through_host.non_blocking; - } else - non_blocking = ev->u.ep_connect.non_blocking; - - dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); - ep = transport->ep_connect(shost, dst_addr, non_blocking); - if (IS_ERR(ep)) { - err = PTR_ERR(ep); - goto release_host; - } - - ev->r.ep_connect_ret.handle = ep->id; -release_host: - if (shost) - scsi_host_put(shost); - return err; -} - static int iscsi_if_transport_ep(struct iscsi_transport *transport, struct iscsi_uevent *ev, int msg_type) { struct iscsi_endpoint *ep; + struct sockaddr *dst_addr; int rc = 0; switch (msg_type) { - case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST: case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: - rc = iscsi_if_ep_connect(transport, ev, msg_type); + if (!transport->ep_connect) + return -EINVAL; + + dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev)); + ep = transport->ep_connect(dst_addr, + ev->u.ep_connect.non_blocking); + if (IS_ERR(ep)) + return PTR_ERR(ep); + + ev->r.ep_connect_ret.handle = ep->id; break; case ISCSI_UEVENT_TRANSPORT_EP_POLL: if (!transport->ep_poll) @@ -1414,31 +1365,7 @@ iscsi_set_host_param(struct iscsi_transport *transport, } static int -iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev) -{ - struct Scsi_Host *shost; - struct iscsi_path *params; - int err; - - if (!transport->set_path) - return -ENOSYS; - - shost = scsi_host_lookup(ev->u.set_path.host_no); - if (!shost) { - printk(KERN_ERR "set path could not find host no %u\n", - ev->u.set_path.host_no); - return -ENODEV; - } - - params = (struct iscsi_path *)((char *)ev + sizeof(*ev)); - err = transport->set_path(shost, params); - - scsi_host_put(shost); - return err; -} - -static int -iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) +iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) { int err = 0; struct iscsi_uevent *ev = NLMSG_DATA(nlh); @@ -1448,11 +1375,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) struct iscsi_cls_conn *conn; struct iscsi_endpoint *ep = NULL; - if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE) - *group = ISCSI_NL_GRP_UIP; - else - *group = ISCSI_NL_GRP_ISCSID; - priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle)); if (!priv) return -EINVAL; @@ -1461,6 +1383,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) if (!try_module_get(transport->owner)) return -EINVAL; + priv->daemon_pid = NETLINK_CREDS(skb)->pid; + switch (nlh->nlmsg_type) { case ISCSI_UEVENT_CREATE_SESSION: err = iscsi_if_create_session(priv, ep, ev, @@ -1545,7 +1469,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) case ISCSI_UEVENT_TRANSPORT_EP_CONNECT: case ISCSI_UEVENT_TRANSPORT_EP_POLL: case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT: - case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST: err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type); break; case ISCSI_UEVENT_TGT_DSCVR: @@ -1554,9 +1477,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) case ISCSI_UEVENT_SET_HOST_PARAM: err = iscsi_set_host_param(transport, ev); break; - case ISCSI_UEVENT_PATH_UPDATE: - err = iscsi_set_path(transport, ev); - break; default: err = -ENOSYS; break; @@ -1579,7 +1499,6 @@ iscsi_if_rx(struct sk_buff *skb) uint32_t rlen; struct nlmsghdr *nlh; struct iscsi_uevent *ev; - uint32_t group; nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < sizeof(*nlh) || @@ -1592,7 +1511,7 @@ iscsi_if_rx(struct sk_buff *skb) if (rlen > skb->len) rlen = skb->len; - err = iscsi_if_recv_msg(skb, nlh, &group); + err = iscsi_if_recv_msg(skb, nlh); if (err) { ev->type = ISCSI_KEVENT_IF_ERROR; ev->iferror = err; @@ -1606,7 +1525,8 @@ iscsi_if_rx(struct sk_buff *skb) */ if (ev->type == ISCSI_UEVENT_GET_STATS && !err) break; - err = iscsi_if_send_reply(group, nlh->nlmsg_seq, + err = iscsi_if_send_reply( + NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq, nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); } while (err < 0 && err != -ECONNREFUSED); skb_pull(skb, rlen); @@ -1854,6 +1774,7 @@ iscsi_register_transport(struct iscsi_transport *tt) if (!priv) return NULL; INIT_LIST_HEAD(&priv->list); + priv->daemon_pid = -1; priv->iscsi_transport = tt; priv->t.user_scan = iscsi_user_scan; priv->t.create_work_queue = 1; diff --git a/trunk/drivers/scsi/sd.c b/trunk/drivers/scsi/sd.c index 878b17a9af30..bcf3bd40bbd5 100644 --- a/trunk/drivers/scsi/sd.c +++ b/trunk/drivers/scsi/sd.c @@ -1902,6 +1902,24 @@ static void sd_probe_async(void *data, async_cookie_t cookie) index = sdkp->index; dev = &sdp->sdev_gendev; + if (!sdp->request_queue->rq_timeout) { + if (sdp->type != TYPE_MOD) + blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT); + else + blk_queue_rq_timeout(sdp->request_queue, + SD_MOD_TIMEOUT); + } + + device_initialize(&sdkp->dev); + sdkp->dev.parent = &sdp->sdev_gendev; + sdkp->dev.class = &sd_disk_class; + dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev)); + + if (device_add(&sdkp->dev)) + goto out_free_index; + + get_device(&sdp->sdev_gendev); + if (index < SD_MAX_DISKS) { gd->major = sd_major((index & 0xf0) >> 4); gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00); @@ -1936,6 +1954,11 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", sdp->removable ? "removable " : ""); + + return; + + out_free_index: + ida_remove(&sd_index_ida, index); } /** @@ -2003,24 +2026,6 @@ static int sd_probe(struct device *dev) sdkp->openers = 0; sdkp->previous_state = 1; - if (!sdp->request_queue->rq_timeout) { - if (sdp->type != TYPE_MOD) - blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT); - else - blk_queue_rq_timeout(sdp->request_queue, - SD_MOD_TIMEOUT); - } - - device_initialize(&sdkp->dev); - sdkp->dev.parent = &sdp->sdev_gendev; - sdkp->dev.class = &sd_disk_class; - dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev)); - - if (device_add(&sdkp->dev)) - goto out_free_index; - - get_device(&sdp->sdev_gendev); - async_schedule(sd_probe_async, sdkp); return 0; @@ -2050,10 +2055,8 @@ static int sd_probe(struct device *dev) **/ static int sd_remove(struct device *dev) { - struct scsi_disk *sdkp; + struct scsi_disk *sdkp = dev_get_drvdata(dev); - async_synchronize_full(); - sdkp = dev_get_drvdata(dev); device_del(&sdkp->dev); del_gendisk(sdkp->disk); sd_shutdown(dev); diff --git a/trunk/drivers/scsi/st.c b/trunk/drivers/scsi/st.c index b33d04250bbc..89bd438e1fe3 100644 --- a/trunk/drivers/scsi/st.c +++ b/trunk/drivers/scsi/st.c @@ -2964,7 +2964,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon !(STp->use_pf & PF_TESTED)) { /* Try the other possible state of Page Format if not already tried */ - STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED; + STp->use_pf = !STp->use_pf | PF_TESTED; st_release_request(SRpnt); SRpnt = NULL; return st_int_ioctl(STp, cmd_in, arg); diff --git a/trunk/drivers/scsi/sym53c8xx_2/sym_glue.c b/trunk/drivers/scsi/sym53c8xx_2/sym_glue.c index 45374d66d26a..583966ec8266 100644 --- a/trunk/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/trunk/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -737,14 +737,11 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev) struct sym_hcb *np = sym_get_hcb(sdev->host); struct sym_tcb *tp = &np->target[sdev->id]; struct sym_lcb *lp; - unsigned long flags; - int error; if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN) return -ENXIO; - spin_lock_irqsave(np->s.host->host_lock, flags); - + tp->starget = sdev->sdev_target; /* * Fail the device init if the device is flagged NOSCAN at BOOT in * the NVRAM. This may speed up boot and maintain coherency with @@ -756,37 +753,26 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev) if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) { tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED; - starget_printk(KERN_INFO, sdev->sdev_target, + starget_printk(KERN_INFO, tp->starget, "Scan at boot disabled in NVRAM\n"); - error = -ENXIO; - goto out; + return -ENXIO; } if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) { - if (sdev->lun != 0) { - error = -ENXIO; - goto out; - } - starget_printk(KERN_INFO, sdev->sdev_target, + if (sdev->lun != 0) + return -ENXIO; + starget_printk(KERN_INFO, tp->starget, "Multiple LUNs disabled in NVRAM\n"); } lp = sym_alloc_lcb(np, sdev->id, sdev->lun); - if (!lp) { - error = -ENOMEM; - goto out; - } - if (tp->nlcb == 1) - tp->starget = sdev->sdev_target; + if (!lp) + return -ENOMEM; spi_min_period(tp->starget) = tp->usr_period; spi_max_width(tp->starget) = tp->usr_width; - error = 0; -out: - spin_unlock_irqrestore(np->s.host->host_lock, flags); - - return error; + return 0; } /* @@ -833,34 +819,12 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev) static void sym53c8xx_slave_destroy(struct scsi_device *sdev) { struct sym_hcb *np = sym_get_hcb(sdev->host); - struct sym_tcb *tp = &np->target[sdev->id]; - struct sym_lcb *lp = sym_lp(tp, sdev->lun); - unsigned long flags; - - spin_lock_irqsave(np->s.host->host_lock, flags); - - if (lp->busy_itlq || lp->busy_itl) { - /* - * This really shouldn't happen, but we can't return an error - * so let's try to stop all on-going I/O. - */ - starget_printk(KERN_WARNING, tp->starget, - "Removing busy LCB (%d)\n", sdev->lun); - sym_reset_scsi_bus(np, 1); - } + struct sym_lcb *lp = sym_lp(&np->target[sdev->id], sdev->lun); - if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) { - /* - * It was the last unit for this target. - */ - tp->head.sval = 0; - tp->head.wval = np->rv_scntl3; - tp->head.uval = 0; - tp->tgoal.check_nego = 1; - tp->starget = NULL; - } - - spin_unlock_irqrestore(np->s.host->host_lock, flags); + if (lp->itlq_tbl) + sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK * 4, "ITLQ_TBL"); + kfree(lp->cb_tags); + sym_mfree_dma(lp, sizeof(*lp), "LCB"); } /* @@ -926,8 +890,6 @@ static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc) if (!((uc->target >> t) & 1)) continue; tp = &np->target[t]; - if (!tp->nlcb) - continue; switch (uc->cmd) { diff --git a/trunk/drivers/scsi/sym53c8xx_2/sym_hipd.c b/trunk/drivers/scsi/sym53c8xx_2/sym_hipd.c index 69ad4945c936..ffa70d1ed182 100644 --- a/trunk/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/trunk/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -1896,15 +1896,6 @@ void sym_start_up(struct Scsi_Host *shost, int reason) tp->head.sval = 0; tp->head.wval = np->rv_scntl3; tp->head.uval = 0; - if (tp->lun0p) - tp->lun0p->to_clear = 0; - if (tp->lunmp) { - int ln; - - for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++) - if (tp->lunmp[ln]) - tp->lunmp[ln]->to_clear = 0; - } } /* @@ -4997,7 +4988,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln) */ if (ln && !tp->lunmp) { tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *), - GFP_ATOMIC); + GFP_KERNEL); if (!tp->lunmp) goto fail; } @@ -5017,7 +5008,6 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln) tp->lun0p = lp; tp->head.lun0_sa = cpu_to_scr(vtobus(lp)); } - tp->nlcb++; /* * Let the itl task point to error handling. @@ -5094,43 +5084,6 @@ static void sym_alloc_lcb_tags (struct sym_hcb *np, u_char tn, u_char ln) return; } -/* - * Lun control block deallocation. Returns the number of valid remaing LCBs - * for the target. - */ -int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln) -{ - struct sym_tcb *tp = &np->target[tn]; - struct sym_lcb *lp = sym_lp(tp, ln); - - tp->nlcb--; - - if (ln) { - if (!tp->nlcb) { - kfree(tp->lunmp); - sym_mfree_dma(tp->luntbl, 256, "LUNTBL"); - tp->lunmp = NULL; - tp->luntbl = NULL; - tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl)); - } else { - tp->luntbl[ln] = cpu_to_scr(vtobus(&np->badlun_sa)); - tp->lunmp[ln] = NULL; - } - } else { - tp->lun0p = NULL; - tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa)); - } - - if (lp->itlq_tbl) { - sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL"); - kfree(lp->cb_tags); - } - - sym_mfree_dma(lp, sizeof(*lp), "LCB"); - - return tp->nlcb; -} - /* * Queue a SCSI IO to the controller. */ diff --git a/trunk/drivers/scsi/sym53c8xx_2/sym_hipd.h b/trunk/drivers/scsi/sym53c8xx_2/sym_hipd.h index 053e63c86822..9ebc8706b6bf 100644 --- a/trunk/drivers/scsi/sym53c8xx_2/sym_hipd.h +++ b/trunk/drivers/scsi/sym53c8xx_2/sym_hipd.h @@ -401,7 +401,6 @@ struct sym_tcb { * An array of bus addresses is used on reselection. */ u32 *luntbl; /* LCBs bus address table */ - int nlcb; /* Number of valid LCBs (including LUN #0) */ /* * LUN table used by the C code. @@ -1066,7 +1065,6 @@ int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order); void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp); struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln); -int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln); int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp); int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out); int sym_reset_scsi_target(struct sym_hcb *np, int target); diff --git a/trunk/drivers/video/aty/aty128fb.c b/trunk/drivers/video/aty/aty128fb.c index e4e4d433b007..35e8eb02b9e9 100644 --- a/trunk/drivers/video/aty/aty128fb.c +++ b/trunk/drivers/video/aty/aty128fb.c @@ -354,7 +354,7 @@ static int default_crt_on __devinitdata = 0; static int default_lcd_on __devinitdata = 1; #ifdef CONFIG_MTRR -static bool mtrr = true; +static int mtrr = 1; #endif #ifdef CONFIG_PMAC_BACKLIGHT diff --git a/trunk/drivers/video/cyber2000fb.c b/trunk/drivers/video/cyber2000fb.c index da7c01b39be2..83c5cefc266c 100644 --- a/trunk/drivers/video/cyber2000fb.c +++ b/trunk/drivers/video/cyber2000fb.c @@ -1736,8 +1736,10 @@ static int __init cyber2000fb_init(void) #ifdef CONFIG_ARCH_SHARK err = cyberpro_vl_probe(); - if (!err) + if (!err) { ret = 0; + __module_get(THIS_MODULE); + } #endif #ifdef CONFIG_PCI err = pci_register_driver(&cyberpro_driver); @@ -1747,15 +1749,14 @@ static int __init cyber2000fb_init(void) return ret ? err : 0; } -module_init(cyber2000fb_init); -#ifndef CONFIG_ARCH_SHARK static void __exit cyberpro_exit(void) { pci_unregister_driver(&cyberpro_driver); } + +module_init(cyber2000fb_init); module_exit(cyberpro_exit); -#endif MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("CyberPro 2000, 2010 and 5000 framebuffer driver"); diff --git a/trunk/drivers/video/uvesafb.c b/trunk/drivers/video/uvesafb.c index ca5b4643a401..421770b5e6ab 100644 --- a/trunk/drivers/video/uvesafb.c +++ b/trunk/drivers/video/uvesafb.c @@ -45,7 +45,7 @@ static struct fb_fix_screeninfo uvesafb_fix __devinitdata = { static int mtrr __devinitdata = 3; /* enable mtrr by default */ static int blank = 1; /* enable blanking by default */ static int ypan = 1; /* 0: scroll, 1: ypan, 2: ywrap */ -static bool pmi_setpal __devinitdata = true; /* use PMI for palette changes */ +static int pmi_setpal __devinitdata = 1; /* use PMI for palette changes */ static int nocrtc __devinitdata; /* ignore CRTC settings */ static int noedid __devinitdata; /* don't try DDC transfers */ static int vram_remap __devinitdata; /* set amt. of memory to be used */ @@ -2002,7 +2002,11 @@ static void __devexit uvesafb_exit(void) module_exit(uvesafb_exit); -#define param_get_scroll NULL +static int param_get_scroll(char *buffer, struct kernel_param *kp) +{ + return 0; +} + static int param_set_scroll(const char *val, struct kernel_param *kp) { ypan = 0; @@ -2013,8 +2017,6 @@ static int param_set_scroll(const char *val, struct kernel_param *kp) ypan = 1; else if (!strcmp(val, "ywrap")) ypan = 2; - else - return -EINVAL; return 0; } diff --git a/trunk/drivers/virtio/virtio.c b/trunk/drivers/virtio/virtio.c index 3a43ebf83a49..018c070a357f 100644 --- a/trunk/drivers/virtio/virtio.c +++ b/trunk/drivers/virtio/virtio.c @@ -31,37 +31,21 @@ static ssize_t modalias_show(struct device *_d, return sprintf(buf, "virtio:d%08Xv%08X\n", dev->id.device, dev->id.vendor); } -static ssize_t features_show(struct device *_d, - struct device_attribute *attr, char *buf) -{ - struct virtio_device *dev = container_of(_d, struct virtio_device, dev); - unsigned int i; - ssize_t len = 0; - - /* We actually represent this as a bitstring, as it could be - * arbitrary length in future. */ - for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++) - len += sprintf(buf+len, "%c", - test_bit(i, dev->features) ? '1' : '0'); - len += sprintf(buf+len, "\n"); - return len; -} static struct device_attribute virtio_dev_attrs[] = { __ATTR_RO(device), __ATTR_RO(vendor), __ATTR_RO(status), __ATTR_RO(modalias), - __ATTR_RO(features), __ATTR_NULL }; static inline int virtio_id_match(const struct virtio_device *dev, const struct virtio_device_id *id) { - if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID) + if (id->device != dev->id.device) return 0; - return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor; + return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor != dev->id.vendor; } /* This looks through all the IDs a driver claims to support. If any of them @@ -134,14 +118,13 @@ static int virtio_dev_probe(struct device *_d) if (device_features & (1 << i)) set_bit(i, dev->features); - dev->config->finalize_features(dev); - err = drv->probe(dev); if (err) add_status(dev, VIRTIO_CONFIG_S_FAILED); - else + else { + dev->config->finalize_features(dev); add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); - + } return err; } @@ -202,8 +185,6 @@ int register_virtio_device(struct virtio_device *dev) /* Acknowledge that we've seen the device. */ add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); - INIT_LIST_HEAD(&dev->vqs); - /* device_register() causes the bus infrastructure to look for a * matching driver. */ err = device_register(&dev->dev); diff --git a/trunk/drivers/virtio/virtio_balloon.c b/trunk/drivers/virtio/virtio_balloon.c index 26b278264796..9c76a061a04d 100644 --- a/trunk/drivers/virtio/virtio_balloon.c +++ b/trunk/drivers/virtio/virtio_balloon.c @@ -204,9 +204,6 @@ static int balloon(void *_vballoon) static int virtballoon_probe(struct virtio_device *vdev) { struct virtio_balloon *vb; - struct virtqueue *vqs[2]; - vq_callback_t *callbacks[] = { balloon_ack, balloon_ack }; - const char *names[] = { "inflate", "deflate" }; int err; vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); @@ -221,17 +218,22 @@ static int virtballoon_probe(struct virtio_device *vdev) vb->vdev = vdev; /* We expect two virtqueues. */ - err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names); - if (err) + vb->inflate_vq = vdev->config->find_vq(vdev, 0, balloon_ack); + if (IS_ERR(vb->inflate_vq)) { + err = PTR_ERR(vb->inflate_vq); goto out_free_vb; + } - vb->inflate_vq = vqs[0]; - vb->deflate_vq = vqs[1]; + vb->deflate_vq = vdev->config->find_vq(vdev, 1, balloon_ack); + if (IS_ERR(vb->deflate_vq)) { + err = PTR_ERR(vb->deflate_vq); + goto out_del_inflate_vq; + } vb->thread = kthread_run(balloon, vb, "vballoon"); if (IS_ERR(vb->thread)) { err = PTR_ERR(vb->thread); - goto out_del_vqs; + goto out_del_deflate_vq; } vb->tell_host_first @@ -239,8 +241,10 @@ static int virtballoon_probe(struct virtio_device *vdev) return 0; -out_del_vqs: - vdev->config->del_vqs(vdev); +out_del_deflate_vq: + vdev->config->del_vq(vb->deflate_vq); +out_del_inflate_vq: + vdev->config->del_vq(vb->inflate_vq); out_free_vb: kfree(vb); out: @@ -260,7 +264,8 @@ static void virtballoon_remove(struct virtio_device *vdev) /* Now we reset the device so we can clean up the queues. */ vdev->config->reset(vdev); - vdev->config->del_vqs(vdev); + vdev->config->del_vq(vb->deflate_vq); + vdev->config->del_vq(vb->inflate_vq); kfree(vb); } diff --git a/trunk/drivers/virtio/virtio_pci.c b/trunk/drivers/virtio/virtio_pci.c index 193c8f0e5cc5..330aacbdec1f 100644 --- a/trunk/drivers/virtio/virtio_pci.c +++ b/trunk/drivers/virtio/virtio_pci.c @@ -42,26 +42,6 @@ struct virtio_pci_device /* a list of queues so we can dispatch IRQs */ spinlock_t lock; struct list_head virtqueues; - - /* MSI-X support */ - int msix_enabled; - int intx_enabled; - struct msix_entry *msix_entries; - /* Name strings for interrupts. This size should be enough, - * and I'm too lazy to allocate each name separately. */ - char (*msix_names)[256]; - /* Number of available vectors */ - unsigned msix_vectors; - /* Vectors allocated */ - unsigned msix_used_vectors; -}; - -/* Constants for MSI-X */ -/* Use first vector for configuration changes, second and the rest for - * virtqueues Thus, we need at least 2 vectors for MSI. */ -enum { - VP_MSIX_CONFIG_VECTOR = 0, - VP_MSIX_VQ_VECTOR = 1, }; struct virtio_pci_vq_info @@ -80,9 +60,6 @@ struct virtio_pci_vq_info /* the list node for the virtqueues list */ struct list_head node; - - /* MSI-X vector (or none) */ - unsigned vector; }; /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ @@ -132,8 +109,7 @@ static void vp_get(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - void __iomem *ioaddr = vp_dev->ioaddr + - VIRTIO_PCI_CONFIG(vp_dev) + offset; + void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset; u8 *ptr = buf; int i; @@ -147,8 +123,7 @@ static void vp_set(struct virtio_device *vdev, unsigned offset, const void *buf, unsigned len) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); - void __iomem *ioaddr = vp_dev->ioaddr + - VIRTIO_PCI_CONFIG(vp_dev) + offset; + void __iomem *ioaddr = vp_dev->ioaddr + VIRTIO_PCI_CONFIG + offset; const u8 *ptr = buf; int i; @@ -189,37 +164,6 @@ static void vp_notify(struct virtqueue *vq) iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); } -/* Handle a configuration change: Tell driver if it wants to know. */ -static irqreturn_t vp_config_changed(int irq, void *opaque) -{ - struct virtio_pci_device *vp_dev = opaque; - struct virtio_driver *drv; - drv = container_of(vp_dev->vdev.dev.driver, - struct virtio_driver, driver); - - if (drv && drv->config_changed) - drv->config_changed(&vp_dev->vdev); - return IRQ_HANDLED; -} - -/* Notify all virtqueues on an interrupt. */ -static irqreturn_t vp_vring_interrupt(int irq, void *opaque) -{ - struct virtio_pci_device *vp_dev = opaque; - struct virtio_pci_vq_info *info; - irqreturn_t ret = IRQ_NONE; - unsigned long flags; - - spin_lock_irqsave(&vp_dev->lock, flags); - list_for_each_entry(info, &vp_dev->virtqueues, node) { - if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) - ret = IRQ_HANDLED; - } - spin_unlock_irqrestore(&vp_dev->lock, flags); - - return ret; -} - /* A small wrapper to also acknowledge the interrupt when it's handled. * I really need an EIO hook for the vring so I can ack the interrupt once we * know that we'll be handling the IRQ but before we invoke the callback since @@ -229,6 +173,9 @@ static irqreturn_t vp_vring_interrupt(int irq, void *opaque) static irqreturn_t vp_interrupt(int irq, void *opaque) { struct virtio_pci_device *vp_dev = opaque; + struct virtio_pci_vq_info *info; + irqreturn_t ret = IRQ_NONE; + unsigned long flags; u8 isr; /* reading the ISR has the effect of also clearing it so it's very @@ -240,137 +187,34 @@ static irqreturn_t vp_interrupt(int irq, void *opaque) return IRQ_NONE; /* Configuration change? Tell driver if it wants to know. */ - if (isr & VIRTIO_PCI_ISR_CONFIG) - vp_config_changed(irq, opaque); + if (isr & VIRTIO_PCI_ISR_CONFIG) { + struct virtio_driver *drv; + drv = container_of(vp_dev->vdev.dev.driver, + struct virtio_driver, driver); - return vp_vring_interrupt(irq, opaque); -} - -static void vp_free_vectors(struct virtio_device *vdev) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - int i; - - if (vp_dev->intx_enabled) { - free_irq(vp_dev->pci_dev->irq, vp_dev); - vp_dev->intx_enabled = 0; - } - - for (i = 0; i < vp_dev->msix_used_vectors; ++i) - free_irq(vp_dev->msix_entries[i].vector, vp_dev); - vp_dev->msix_used_vectors = 0; - - if (vp_dev->msix_enabled) { - /* Disable the vector used for configuration */ - iowrite16(VIRTIO_MSI_NO_VECTOR, - vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); - /* Flush the write out to device */ - ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); - - vp_dev->msix_enabled = 0; - pci_disable_msix(vp_dev->pci_dev); + if (drv && drv->config_changed) + drv->config_changed(&vp_dev->vdev); } -} - -static int vp_enable_msix(struct pci_dev *dev, struct msix_entry *entries, - int *options, int noptions) -{ - int i; - for (i = 0; i < noptions; ++i) - if (!pci_enable_msix(dev, entries, options[i])) - return options[i]; - return -EBUSY; -} -static int vp_request_vectors(struct virtio_device *vdev, unsigned max_vqs) -{ - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - const char *name = dev_name(&vp_dev->vdev.dev); - unsigned i, v; - int err = -ENOMEM; - /* We want at most one vector per queue and one for config changes. - * Fallback to separate vectors for config and a shared for queues. - * Finally fall back to regular interrupts. */ - int options[] = { max_vqs + 1, 2 }; - int nvectors = max(options[0], options[1]); - - vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, - GFP_KERNEL); - if (!vp_dev->msix_entries) - goto error_entries; - vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, - GFP_KERNEL); - if (!vp_dev->msix_names) - goto error_names; - - for (i = 0; i < nvectors; ++i) - vp_dev->msix_entries[i].entry = i; - - err = vp_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, - options, ARRAY_SIZE(options)); - if (err < 0) { - /* Can't allocate enough MSI-X vectors, use regular interrupt */ - vp_dev->msix_vectors = 0; - err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, - IRQF_SHARED, name, vp_dev); - if (err) - goto error_irq; - vp_dev->intx_enabled = 1; - } else { - vp_dev->msix_vectors = err; - vp_dev->msix_enabled = 1; - - /* Set the vector used for configuration */ - v = vp_dev->msix_used_vectors; - snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, - "%s-config", name); - err = request_irq(vp_dev->msix_entries[v].vector, - vp_config_changed, 0, vp_dev->msix_names[v], - vp_dev); - if (err) - goto error_irq; - ++vp_dev->msix_used_vectors; - - iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); - /* Verify we had enough resources to assign the vector */ - v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); - if (v == VIRTIO_MSI_NO_VECTOR) { - err = -EBUSY; - goto error_irq; - } + spin_lock_irqsave(&vp_dev->lock, flags); + list_for_each_entry(info, &vp_dev->virtqueues, node) { + if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) + ret = IRQ_HANDLED; } + spin_unlock_irqrestore(&vp_dev->lock, flags); - if (vp_dev->msix_vectors && vp_dev->msix_vectors != max_vqs + 1) { - /* Shared vector for all VQs */ - v = vp_dev->msix_used_vectors; - snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, - "%s-virtqueues", name); - err = request_irq(vp_dev->msix_entries[v].vector, - vp_vring_interrupt, 0, vp_dev->msix_names[v], - vp_dev); - if (err) - goto error_irq; - ++vp_dev->msix_used_vectors; - } - return 0; -error_irq: - vp_free_vectors(vdev); - kfree(vp_dev->msix_names); -error_names: - kfree(vp_dev->msix_entries); -error_entries: - return err; + return ret; } +/* the config->find_vq() implementation */ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, - void (*callback)(struct virtqueue *vq), - const char *name) + void (*callback)(struct virtqueue *vq)) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtio_pci_vq_info *info; struct virtqueue *vq; unsigned long flags, size; - u16 num, vector; + u16 num; int err; /* Select the queue we're interested in */ @@ -389,7 +233,6 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, info->queue_index = index; info->num = num; - info->vector = VIRTIO_MSI_NO_VECTOR; size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); @@ -404,7 +247,7 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, /* create the vring */ vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, - vdev, info->queue, vp_notify, callback, name); + vdev, info->queue, vp_notify, callback); if (!vq) { err = -ENOMEM; goto out_activate_queue; @@ -413,43 +256,12 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, vq->priv = info; info->vq = vq; - /* allocate per-vq vector if available and necessary */ - if (callback && vp_dev->msix_used_vectors < vp_dev->msix_vectors) { - vector = vp_dev->msix_used_vectors; - snprintf(vp_dev->msix_names[vector], sizeof *vp_dev->msix_names, - "%s-%s", dev_name(&vp_dev->vdev.dev), name); - err = request_irq(vp_dev->msix_entries[vector].vector, - vring_interrupt, 0, - vp_dev->msix_names[vector], vq); - if (err) - goto out_request_irq; - info->vector = vector; - ++vp_dev->msix_used_vectors; - } else - vector = VP_MSIX_VQ_VECTOR; - - if (callback && vp_dev->msix_enabled) { - iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); - vector = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); - if (vector == VIRTIO_MSI_NO_VECTOR) { - err = -EBUSY; - goto out_assign; - } - } - spin_lock_irqsave(&vp_dev->lock, flags); list_add(&info->node, &vp_dev->virtqueues); spin_unlock_irqrestore(&vp_dev->lock, flags); return vq; -out_assign: - if (info->vector != VIRTIO_MSI_NO_VECTOR) { - free_irq(vp_dev->msix_entries[info->vector].vector, vq); - --vp_dev->msix_used_vectors; - } -out_request_irq: - vring_del_virtqueue(vq); out_activate_queue: iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); free_pages_exact(info->queue, size); @@ -458,27 +270,21 @@ static struct virtqueue *vp_find_vq(struct virtio_device *vdev, unsigned index, return ERR_PTR(err); } +/* the config->del_vq() implementation */ static void vp_del_vq(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); struct virtio_pci_vq_info *info = vq->priv; - unsigned long size; - - iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); - - if (info->vector != VIRTIO_MSI_NO_VECTOR) - free_irq(vp_dev->msix_entries[info->vector].vector, vq); + unsigned long flags, size; - if (vp_dev->msix_enabled) { - iowrite16(VIRTIO_MSI_NO_VECTOR, - vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); - /* Flush the write out to device */ - ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); - } + spin_lock_irqsave(&vp_dev->lock, flags); + list_del(&info->node); + spin_unlock_irqrestore(&vp_dev->lock, flags); vring_del_virtqueue(vq); /* Select and deactivate the queue */ + iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); @@ -486,57 +292,14 @@ static void vp_del_vq(struct virtqueue *vq) kfree(info); } -/* the config->del_vqs() implementation */ -static void vp_del_vqs(struct virtio_device *vdev) -{ - struct virtqueue *vq, *n; - - list_for_each_entry_safe(vq, n, &vdev->vqs, list) - vp_del_vq(vq); - - vp_free_vectors(vdev); -} - -/* the config->find_vqs() implementation */ -static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char *names[]) -{ - int vectors = 0; - int i, err; - - /* How many vectors would we like? */ - for (i = 0; i < nvqs; ++i) - if (callbacks[i]) - ++vectors; - - err = vp_request_vectors(vdev, vectors); - if (err) - goto error_request; - - for (i = 0; i < nvqs; ++i) { - vqs[i] = vp_find_vq(vdev, i, callbacks[i], names[i]); - if (IS_ERR(vqs[i])) - goto error_find; - } - return 0; - -error_find: - vp_del_vqs(vdev); - -error_request: - return PTR_ERR(vqs[i]); -} - static struct virtio_config_ops virtio_pci_config_ops = { .get = vp_get, .set = vp_set, .get_status = vp_get_status, .set_status = vp_set_status, .reset = vp_reset, - .find_vqs = vp_find_vqs, - .del_vqs = vp_del_vqs, + .find_vq = vp_find_vq, + .del_vq = vp_del_vq, .get_features = vp_get_features, .finalize_features = vp_finalize_features, }; @@ -547,7 +310,7 @@ static void virtio_pci_release_dev(struct device *_d) struct virtio_pci_device *vp_dev = to_vp_device(dev); struct pci_dev *pci_dev = vp_dev->pci_dev; - vp_del_vqs(dev); + free_irq(pci_dev->irq, vp_dev); pci_set_drvdata(pci_dev, NULL); pci_iounmap(pci_dev, vp_dev->ioaddr); pci_release_regions(pci_dev); @@ -606,13 +369,21 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; vp_dev->vdev.id.device = pci_dev->subsystem_device; + /* register a handler for the queue with the PCI device's interrupt */ + err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED, + dev_name(&vp_dev->vdev.dev), vp_dev); + if (err) + goto out_set_drvdata; + /* finally register the virtio device */ err = register_virtio_device(&vp_dev->vdev); if (err) - goto out_set_drvdata; + goto out_req_irq; return 0; +out_req_irq: + free_irq(pci_dev->irq, vp_dev); out_set_drvdata: pci_set_drvdata(pci_dev, NULL); pci_iounmap(pci_dev, vp_dev->ioaddr); diff --git a/trunk/drivers/virtio/virtio_ring.c b/trunk/drivers/virtio/virtio_ring.c index a882f2606515..5c52369ab9bb 100644 --- a/trunk/drivers/virtio/virtio_ring.c +++ b/trunk/drivers/virtio/virtio_ring.c @@ -23,30 +23,21 @@ #ifdef DEBUG /* For development, we want to crash whenever the ring is screwed. */ -#define BAD_RING(_vq, fmt, args...) \ - do { \ - dev_err(&(_vq)->vq.vdev->dev, \ - "%s:"fmt, (_vq)->vq.name, ##args); \ - BUG(); \ - } while (0) +#define BAD_RING(_vq, fmt...) \ + do { dev_err(&(_vq)->vq.vdev->dev, fmt); BUG(); } while(0) /* Caller is supposed to guarantee no reentry. */ #define START_USE(_vq) \ do { \ if ((_vq)->in_use) \ - panic("%s:in_use = %i\n", \ - (_vq)->vq.name, (_vq)->in_use); \ + panic("in_use = %i\n", (_vq)->in_use); \ (_vq)->in_use = __LINE__; \ mb(); \ - } while (0) + } while(0) #define END_USE(_vq) \ do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; mb(); } while(0) #else -#define BAD_RING(_vq, fmt, args...) \ - do { \ - dev_err(&_vq->vq.vdev->dev, \ - "%s:"fmt, (_vq)->vq.name, ##args); \ - (_vq)->broken = true; \ - } while (0) +#define BAD_RING(_vq, fmt...) \ + do { dev_err(&_vq->vq.vdev->dev, fmt); (_vq)->broken = true; } while(0) #define START_USE(vq) #define END_USE(vq) #endif @@ -61,9 +52,6 @@ struct vring_virtqueue /* Other side has made a mess, don't try any more. */ bool broken; - /* Host supports indirect buffers */ - bool indirect; - /* Number of free buffers */ unsigned int num_free; /* Head of free buffer list. */ @@ -88,55 +76,6 @@ struct vring_virtqueue #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) -/* Set up an indirect table of descriptors and add it to the queue. */ -static int vring_add_indirect(struct vring_virtqueue *vq, - struct scatterlist sg[], - unsigned int out, - unsigned int in) -{ - struct vring_desc *desc; - unsigned head; - int i; - - desc = kmalloc((out + in) * sizeof(struct vring_desc), GFP_ATOMIC); - if (!desc) - return vq->vring.num; - - /* Transfer entries from the sg list into the indirect page */ - for (i = 0; i < out; i++) { - desc[i].flags = VRING_DESC_F_NEXT; - desc[i].addr = sg_phys(sg); - desc[i].len = sg->length; - desc[i].next = i+1; - sg++; - } - for (; i < (out + in); i++) { - desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; - desc[i].addr = sg_phys(sg); - desc[i].len = sg->length; - desc[i].next = i+1; - sg++; - } - - /* Last one doesn't continue. */ - desc[i-1].flags &= ~VRING_DESC_F_NEXT; - desc[i-1].next = 0; - - /* We're about to use a buffer */ - vq->num_free--; - - /* Use a single buffer which doesn't continue */ - head = vq->free_head; - vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; - vq->vring.desc[head].addr = virt_to_phys(desc); - vq->vring.desc[head].len = i * sizeof(struct vring_desc); - - /* Update free pointer */ - vq->free_head = vq->vring.desc[head].next; - - return head; -} - static int vring_add_buf(struct virtqueue *_vq, struct scatterlist sg[], unsigned int out, @@ -146,21 +85,12 @@ static int vring_add_buf(struct virtqueue *_vq, struct vring_virtqueue *vq = to_vvq(_vq); unsigned int i, avail, head, uninitialized_var(prev); - START_USE(vq); - BUG_ON(data == NULL); - - /* If the host supports indirect descriptor tables, and we have multiple - * buffers, then go indirect. FIXME: tune this threshold */ - if (vq->indirect && (out + in) > 1 && vq->num_free) { - head = vring_add_indirect(vq, sg, out, in); - if (head != vq->vring.num) - goto add_head; - } - BUG_ON(out + in > vq->vring.num); BUG_ON(out + in == 0); + START_USE(vq); + if (vq->num_free < out + in) { pr_debug("Can't add buf len %i - avail = %i\n", out + in, vq->num_free); @@ -197,7 +127,6 @@ static int vring_add_buf(struct virtqueue *_vq, /* Update free pointer */ vq->free_head = i; -add_head: /* Set token. */ vq->data[head] = data; @@ -241,11 +170,6 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head) /* Put back on free list: find end */ i = head; - - /* Free the indirect table */ - if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) - kfree(phys_to_virt(vq->vring.desc[i].addr)); - while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { i = vq->vring.desc[i].next; vq->num_free++; @@ -360,8 +284,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, struct virtio_device *vdev, void *pages, void (*notify)(struct virtqueue *), - void (*callback)(struct virtqueue *), - const char *name) + void (*callback)(struct virtqueue *)) { struct vring_virtqueue *vq; unsigned int i; @@ -380,18 +303,14 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, vq->vq.callback = callback; vq->vq.vdev = vdev; vq->vq.vq_ops = &vring_vq_ops; - vq->vq.name = name; vq->notify = notify; vq->broken = false; vq->last_used_idx = 0; vq->num_added = 0; - list_add_tail(&vq->vq.list, &vdev->vqs); #ifdef DEBUG vq->in_use = false; #endif - vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); - /* No callback? Tell other side not to bother us. */ if (!callback) vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; @@ -408,7 +327,6 @@ EXPORT_SYMBOL_GPL(vring_new_virtqueue); void vring_del_virtqueue(struct virtqueue *vq) { - list_del(&vq->list); kfree(to_vvq(vq)); } EXPORT_SYMBOL_GPL(vring_del_virtqueue); @@ -420,8 +338,6 @@ void vring_transport_features(struct virtio_device *vdev) for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { switch (i) { - case VIRTIO_RING_F_INDIRECT_DESC: - break; default: /* We don't understand this bit. */ clear_bit(i, vdev->features); diff --git a/trunk/fs/Kconfig b/trunk/fs/Kconfig index 525da2e8f73b..9f7270f36b2a 100644 --- a/trunk/fs/Kconfig +++ b/trunk/fs/Kconfig @@ -62,16 +62,6 @@ source "fs/autofs/Kconfig" source "fs/autofs4/Kconfig" source "fs/fuse/Kconfig" -config CUSE - tristate "Character device in Userpace support" - depends on FUSE_FS - help - This FUSE extension allows character devices to be - implemented in userspace. - - If you want to develop or use userspace character device - based on CUSE, answer Y or M. - config GENERIC_ACL bool select FS_POSIX_ACL diff --git a/trunk/fs/eventfd.c b/trunk/fs/eventfd.c index 3f0e1974abdc..2a701d593d35 100644 --- a/trunk/fs/eventfd.c +++ b/trunk/fs/eventfd.c @@ -16,7 +16,6 @@ #include #include #include -#include struct eventfd_ctx { wait_queue_head_t wqh; @@ -57,7 +56,6 @@ int eventfd_signal(struct file *file, int n) return n; } -EXPORT_SYMBOL_GPL(eventfd_signal); static int eventfd_release(struct inode *inode, struct file *file) { @@ -199,7 +197,6 @@ struct file *eventfd_fget(int fd) return file; } -EXPORT_SYMBOL_GPL(eventfd_fget); SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags) { diff --git a/trunk/fs/exofs/common.h b/trunk/fs/exofs/common.h index 24667eedc023..b1512c4bb8c7 100644 --- a/trunk/fs/exofs/common.h +++ b/trunk/fs/exofs/common.h @@ -175,4 +175,10 @@ int exofs_async_op(struct osd_request *or, int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr); +int osd_req_read_kern(struct osd_request *or, + const struct osd_obj_id *obj, u64 offset, void *buff, u64 len); + +int osd_req_write_kern(struct osd_request *or, + const struct osd_obj_id *obj, u64 offset, void *buff, u64 len); + #endif /*ifndef __EXOFS_COM_H__*/ diff --git a/trunk/fs/exofs/inode.c b/trunk/fs/exofs/inode.c index 77d0a295eb1c..ba8d9fab4693 100644 --- a/trunk/fs/exofs/inode.c +++ b/trunk/fs/exofs/inode.c @@ -59,9 +59,10 @@ static void _pcol_init(struct page_collect *pcol, unsigned expected_pages, struct inode *inode) { struct exofs_sb_info *sbi = inode->i_sb->s_fs_info; + struct request_queue *req_q = sbi->s_dev->scsi_device->request_queue; pcol->sbi = sbi; - pcol->req_q = osd_request_queue(sbi->s_dev); + pcol->req_q = req_q; pcol->inode = inode; pcol->expected_pages = expected_pages; @@ -265,7 +266,7 @@ static int read_exec(struct page_collect *pcol, bool is_sync) goto err; } - osd_req_read(or, &obj, i_start, pcol->bio, pcol->length); + osd_req_read(or, &obj, pcol->bio, i_start); if (is_sync) { exofs_sync_op(or, pcol->sbi->s_timeout, oi->i_cred); @@ -521,8 +522,7 @@ static int write_exec(struct page_collect *pcol) *pcol_copy = *pcol; - pcol_copy->bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */ - osd_req_write(or, &obj, i_start, pcol_copy->bio, pcol_copy->length); + osd_req_write(or, &obj, pcol_copy->bio, i_start); ret = exofs_async_op(or, writepages_done, pcol_copy, oi->i_cred); if (unlikely(ret)) { EXOFS_ERR("write_exec: exofs_async_op() Faild\n"); diff --git a/trunk/fs/exofs/osd.c b/trunk/fs/exofs/osd.c index b3d2ccb87aaa..06ca92672eb5 100644 --- a/trunk/fs/exofs/osd.c +++ b/trunk/fs/exofs/osd.c @@ -125,3 +125,29 @@ int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr) return -EIO; } + +int osd_req_read_kern(struct osd_request *or, + const struct osd_obj_id *obj, u64 offset, void* buff, u64 len) +{ + struct request_queue *req_q = or->osd_dev->scsi_device->request_queue; + struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL); + + if (!bio) + return -ENOMEM; + + osd_req_read(or, obj, bio, offset); + return 0; +} + +int osd_req_write_kern(struct osd_request *or, + const struct osd_obj_id *obj, u64 offset, void* buff, u64 len) +{ + struct request_queue *req_q = or->osd_dev->scsi_device->request_queue; + struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL); + + if (!bio) + return -ENOMEM; + + osd_req_write(or, obj, bio, offset); + return 0; +} diff --git a/trunk/fs/fuse/Makefile b/trunk/fs/fuse/Makefile index e95eeb445e58..72437065f6ad 100644 --- a/trunk/fs/fuse/Makefile +++ b/trunk/fs/fuse/Makefile @@ -3,6 +3,5 @@ # obj-$(CONFIG_FUSE_FS) += fuse.o -obj-$(CONFIG_CUSE) += cuse.o fuse-objs := dev.o dir.o file.o inode.o control.o diff --git a/trunk/fs/fuse/cuse.c b/trunk/fs/fuse/cuse.c deleted file mode 100644 index de792dcf3274..000000000000 --- a/trunk/fs/fuse/cuse.c +++ /dev/null @@ -1,610 +0,0 @@ -/* - * CUSE: Character device in Userspace - * - * Copyright (C) 2008-2009 SUSE Linux Products GmbH - * Copyright (C) 2008-2009 Tejun Heo - * - * This file is released under the GPLv2. - * - * CUSE enables character devices to be implemented from userland much - * like FUSE allows filesystems. On initialization /dev/cuse is - * created. By opening the file and replying to the CUSE_INIT request - * userland CUSE server can create a character device. After that the - * operation is very similar to FUSE. - * - * A CUSE instance involves the following objects. - * - * cuse_conn : contains fuse_conn and serves as bonding structure - * channel : file handle connected to the userland CUSE server - * cdev : the implemented character device - * dev : generic device for cdev - * - * Note that 'channel' is what 'dev' is in FUSE. As CUSE deals with - * devices, it's called 'channel' to reduce confusion. - * - * channel determines when the character device dies. When channel is - * closed, everything begins to destruct. The cuse_conn is taken off - * the lookup table preventing further access from cdev, cdev and - * generic device are removed and the base reference of cuse_conn is - * put. - * - * On each open, the matching cuse_conn is looked up and if found an - * additional reference is taken which is released when the file is - * closed. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "fuse_i.h" - -#define CUSE_CONNTBL_LEN 64 - -struct cuse_conn { - struct list_head list; /* linked on cuse_conntbl */ - struct fuse_conn fc; /* fuse connection */ - struct cdev *cdev; /* associated character device */ - struct device *dev; /* device representing @cdev */ - - /* init parameters, set once during initialization */ - bool unrestricted_ioctl; -}; - -static DEFINE_SPINLOCK(cuse_lock); /* protects cuse_conntbl */ -static struct list_head cuse_conntbl[CUSE_CONNTBL_LEN]; -static struct class *cuse_class; - -static struct cuse_conn *fc_to_cc(struct fuse_conn *fc) -{ - return container_of(fc, struct cuse_conn, fc); -} - -static struct list_head *cuse_conntbl_head(dev_t devt) -{ - return &cuse_conntbl[(MAJOR(devt) + MINOR(devt)) % CUSE_CONNTBL_LEN]; -} - - -/************************************************************************** - * CUSE frontend operations - * - * These are file operations for the character device. - * - * On open, CUSE opens a file from the FUSE mnt and stores it to - * private_data of the open file. All other ops call FUSE ops on the - * FUSE file. - */ - -static ssize_t cuse_read(struct file *file, char __user *buf, size_t count, - loff_t *ppos) -{ - loff_t pos = 0; - - return fuse_direct_io(file, buf, count, &pos, 0); -} - -static ssize_t cuse_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) -{ - loff_t pos = 0; - /* - * No locking or generic_write_checks(), the server is - * responsible for locking and sanity checks. - */ - return fuse_direct_io(file, buf, count, &pos, 1); -} - -static int cuse_open(struct inode *inode, struct file *file) -{ - dev_t devt = inode->i_cdev->dev; - struct cuse_conn *cc = NULL, *pos; - int rc; - - /* look up and get the connection */ - spin_lock(&cuse_lock); - list_for_each_entry(pos, cuse_conntbl_head(devt), list) - if (pos->dev->devt == devt) { - fuse_conn_get(&pos->fc); - cc = pos; - break; - } - spin_unlock(&cuse_lock); - - /* dead? */ - if (!cc) - return -ENODEV; - - /* - * Generic permission check is already done against the chrdev - * file, proceed to open. - */ - rc = fuse_do_open(&cc->fc, 0, file, 0); - if (rc) - fuse_conn_put(&cc->fc); - return rc; -} - -static int cuse_release(struct inode *inode, struct file *file) -{ - struct fuse_file *ff = file->private_data; - struct fuse_conn *fc = ff->fc; - - fuse_sync_release(ff, file->f_flags); - fuse_conn_put(fc); - - return 0; -} - -static long cuse_file_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - struct fuse_file *ff = file->private_data; - struct cuse_conn *cc = fc_to_cc(ff->fc); - unsigned int flags = 0; - - if (cc->unrestricted_ioctl) - flags |= FUSE_IOCTL_UNRESTRICTED; - - return fuse_do_ioctl(file, cmd, arg, flags); -} - -static long cuse_file_compat_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - struct fuse_file *ff = file->private_data; - struct cuse_conn *cc = fc_to_cc(ff->fc); - unsigned int flags = FUSE_IOCTL_COMPAT; - - if (cc->unrestricted_ioctl) - flags |= FUSE_IOCTL_UNRESTRICTED; - - return fuse_do_ioctl(file, cmd, arg, flags); -} - -static const struct file_operations cuse_frontend_fops = { - .owner = THIS_MODULE, - .read = cuse_read, - .write = cuse_write, - .open = cuse_open, - .release = cuse_release, - .unlocked_ioctl = cuse_file_ioctl, - .compat_ioctl = cuse_file_compat_ioctl, - .poll = fuse_file_poll, -}; - - -/************************************************************************** - * CUSE channel initialization and destruction - */ - -struct cuse_devinfo { - const char *name; -}; - -/** - * cuse_parse_one - parse one key=value pair - * @pp: i/o parameter for the current position - * @end: points to one past the end of the packed string - * @keyp: out parameter for key - * @valp: out parameter for value - * - * *@pp points to packed strings - "key0=val0\0key1=val1\0" which ends - * at @end - 1. This function parses one pair and set *@keyp to the - * start of the key and *@valp to the start of the value. Note that - * the original string is modified such that the key string is - * terminated with '\0'. *@pp is updated to point to the next string. - * - * RETURNS: - * 1 on successful parse, 0 on EOF, -errno on failure. - */ -static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp) -{ - char *p = *pp; - char *key, *val; - - while (p < end && *p == '\0') - p++; - if (p == end) - return 0; - - if (end[-1] != '\0') { - printk(KERN_ERR "CUSE: info not properly terminated\n"); - return -EINVAL; - } - - key = val = p; - p += strlen(p); - - if (valp) { - strsep(&val, "="); - if (!val) - val = key + strlen(key); - key = strstrip(key); - val = strstrip(val); - } else - key = strstrip(key); - - if (!strlen(key)) { - printk(KERN_ERR "CUSE: zero length info key specified\n"); - return -EINVAL; - } - - *pp = p; - *keyp = key; - if (valp) - *valp = val; - - return 1; -} - -/** - * cuse_parse_dev_info - parse device info - * @p: device info string - * @len: length of device info string - * @devinfo: out parameter for parsed device info - * - * Parse @p to extract device info and store it into @devinfo. String - * pointed to by @p is modified by parsing and @devinfo points into - * them, so @p shouldn't be freed while @devinfo is in use. - * - * RETURNS: - * 0 on success, -errno on failure. - */ -static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo) -{ - char *end = p + len; - char *key, *val; - int rc; - - while (true) { - rc = cuse_parse_one(&p, end, &key, &val); - if (rc < 0) - return rc; - if (!rc) - break; - if (strcmp(key, "DEVNAME") == 0) - devinfo->name = val; - else - printk(KERN_WARNING "CUSE: unknown device info \"%s\"\n", - key); - } - - if (!devinfo->name || !strlen(devinfo->name)) { - printk(KERN_ERR "CUSE: DEVNAME unspecified\n"); - return -EINVAL; - } - - return 0; -} - -static void cuse_gendev_release(struct device *dev) -{ - kfree(dev); -} - -/** - * cuse_process_init_reply - finish initializing CUSE channel - * - * This function creates the character device and sets up all the - * required data structures for it. Please read the comment at the - * top of this file for high level overview. - */ -static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) -{ - struct cuse_conn *cc = fc_to_cc(fc); - struct cuse_init_out *arg = &req->misc.cuse_init_out; - struct page *page = req->pages[0]; - struct cuse_devinfo devinfo = { }; - struct device *dev; - struct cdev *cdev; - dev_t devt; - int rc; - - if (req->out.h.error || - arg->major != FUSE_KERNEL_VERSION || arg->minor < 11) { - goto err; - } - - fc->minor = arg->minor; - fc->max_read = max_t(unsigned, arg->max_read, 4096); - fc->max_write = max_t(unsigned, arg->max_write, 4096); - - /* parse init reply */ - cc->unrestricted_ioctl = arg->flags & CUSE_UNRESTRICTED_IOCTL; - - rc = cuse_parse_devinfo(page_address(page), req->out.args[1].size, - &devinfo); - if (rc) - goto err; - - /* determine and reserve devt */ - devt = MKDEV(arg->dev_major, arg->dev_minor); - if (!MAJOR(devt)) - rc = alloc_chrdev_region(&devt, MINOR(devt), 1, devinfo.name); - else - rc = register_chrdev_region(devt, 1, devinfo.name); - if (rc) { - printk(KERN_ERR "CUSE: failed to register chrdev region\n"); - goto err; - } - - /* devt determined, create device */ - rc = -ENOMEM; - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - goto err_region; - - device_initialize(dev); - dev_set_uevent_suppress(dev, 1); - dev->class = cuse_class; - dev->devt = devt; - dev->release = cuse_gendev_release; - dev_set_drvdata(dev, cc); - dev_set_name(dev, "%s", devinfo.name); - - rc = device_add(dev); - if (rc) - goto err_device; - - /* register cdev */ - rc = -ENOMEM; - cdev = cdev_alloc(); - if (!cdev) - goto err_device; - - cdev->owner = THIS_MODULE; - cdev->ops = &cuse_frontend_fops; - - rc = cdev_add(cdev, devt, 1); - if (rc) - goto err_cdev; - - cc->dev = dev; - cc->cdev = cdev; - - /* make the device available */ - spin_lock(&cuse_lock); - list_add(&cc->list, cuse_conntbl_head(devt)); - spin_unlock(&cuse_lock); - - /* announce device availability */ - dev_set_uevent_suppress(dev, 0); - kobject_uevent(&dev->kobj, KOBJ_ADD); -out: - __free_page(page); - return; - -err_cdev: - cdev_del(cdev); -err_device: - put_device(dev); -err_region: - unregister_chrdev_region(devt, 1); -err: - fc->conn_error = 1; - goto out; -} - -static int cuse_send_init(struct cuse_conn *cc) -{ - int rc; - struct fuse_req *req; - struct page *page; - struct fuse_conn *fc = &cc->fc; - struct cuse_init_in *arg; - - BUILD_BUG_ON(CUSE_INIT_INFO_MAX > PAGE_SIZE); - - req = fuse_get_req(fc); - if (IS_ERR(req)) { - rc = PTR_ERR(req); - goto err; - } - - rc = -ENOMEM; - page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) - goto err_put_req; - - arg = &req->misc.cuse_init_in; - arg->major = FUSE_KERNEL_VERSION; - arg->minor = FUSE_KERNEL_MINOR_VERSION; - arg->flags |= CUSE_UNRESTRICTED_IOCTL; - req->in.h.opcode = CUSE_INIT; - req->in.numargs = 1; - req->in.args[0].size = sizeof(struct cuse_init_in); - req->in.args[0].value = arg; - req->out.numargs = 2; - req->out.args[0].size = sizeof(struct cuse_init_out); - req->out.args[0].value = &req->misc.cuse_init_out; - req->out.args[1].size = CUSE_INIT_INFO_MAX; - req->out.argvar = 1; - req->out.argpages = 1; - req->pages[0] = page; - req->num_pages = 1; - req->end = cuse_process_init_reply; - fuse_request_send_background(fc, req); - - return 0; - -err_put_req: - fuse_put_request(fc, req); -err: - return rc; -} - -static void cuse_fc_release(struct fuse_conn *fc) -{ - struct cuse_conn *cc = fc_to_cc(fc); - kfree(cc); -} - -/** - * cuse_channel_open - open method for /dev/cuse - * @inode: inode for /dev/cuse - * @file: file struct being opened - * - * Userland CUSE server can create a CUSE device by opening /dev/cuse - * and replying to the initilaization request kernel sends. This - * function is responsible for handling CUSE device initialization. - * Because the fd opened by this function is used during - * initialization, this function only creates cuse_conn and sends - * init. The rest is delegated to a kthread. - * - * RETURNS: - * 0 on success, -errno on failure. - */ -static int cuse_channel_open(struct inode *inode, struct file *file) -{ - struct cuse_conn *cc; - int rc; - - /* set up cuse_conn */ - cc = kzalloc(sizeof(*cc), GFP_KERNEL); - if (!cc) - return -ENOMEM; - - fuse_conn_init(&cc->fc); - - INIT_LIST_HEAD(&cc->list); - cc->fc.release = cuse_fc_release; - - cc->fc.connected = 1; - cc->fc.blocked = 0; - rc = cuse_send_init(cc); - if (rc) { - fuse_conn_put(&cc->fc); - return rc; - } - file->private_data = &cc->fc; /* channel owns base reference to cc */ - - return 0; -} - -/** - * cuse_channel_release - release method for /dev/cuse - * @inode: inode for /dev/cuse - * @file: file struct being closed - * - * Disconnect the channel, deregister CUSE device and initiate - * destruction by putting the default reference. - * - * RETURNS: - * 0 on success, -errno on failure. - */ -static int cuse_channel_release(struct inode *inode, struct file *file) -{ - struct cuse_conn *cc = fc_to_cc(file->private_data); - int rc; - - /* remove from the conntbl, no more access from this point on */ - spin_lock(&cuse_lock); - list_del_init(&cc->list); - spin_unlock(&cuse_lock); - - /* remove device */ - if (cc->dev) - device_unregister(cc->dev); - if (cc->cdev) { - unregister_chrdev_region(cc->cdev->dev, 1); - cdev_del(cc->cdev); - } - - /* kill connection and shutdown channel */ - fuse_conn_kill(&cc->fc); - rc = fuse_dev_release(inode, file); /* puts the base reference */ - - return rc; -} - -static struct file_operations cuse_channel_fops; /* initialized during init */ - - -/************************************************************************** - * Misc stuff and module initializatiion - * - * CUSE exports the same set of attributes to sysfs as fusectl. - */ - -static ssize_t cuse_class_waiting_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct cuse_conn *cc = dev_get_drvdata(dev); - - return sprintf(buf, "%d\n", atomic_read(&cc->fc.num_waiting)); -} - -static ssize_t cuse_class_abort_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct cuse_conn *cc = dev_get_drvdata(dev); - - fuse_abort_conn(&cc->fc); - return count; -} - -static struct device_attribute cuse_class_dev_attrs[] = { - __ATTR(waiting, S_IFREG | 0400, cuse_class_waiting_show, NULL), - __ATTR(abort, S_IFREG | 0200, NULL, cuse_class_abort_store), - { } -}; - -static struct miscdevice cuse_miscdev = { - .minor = MISC_DYNAMIC_MINOR, - .name = "cuse", - .fops = &cuse_channel_fops, -}; - -static int __init cuse_init(void) -{ - int i, rc; - - /* init conntbl */ - for (i = 0; i < CUSE_CONNTBL_LEN; i++) - INIT_LIST_HEAD(&cuse_conntbl[i]); - - /* inherit and extend fuse_dev_operations */ - cuse_channel_fops = fuse_dev_operations; - cuse_channel_fops.owner = THIS_MODULE; - cuse_channel_fops.open = cuse_channel_open; - cuse_channel_fops.release = cuse_channel_release; - - cuse_class = class_create(THIS_MODULE, "cuse"); - if (IS_ERR(cuse_class)) - return PTR_ERR(cuse_class); - - cuse_class->dev_attrs = cuse_class_dev_attrs; - - rc = misc_register(&cuse_miscdev); - if (rc) { - class_destroy(cuse_class); - return rc; - } - - return 0; -} - -static void __exit cuse_exit(void) -{ - misc_deregister(&cuse_miscdev); - class_destroy(cuse_class); -} - -module_init(cuse_init); -module_exit(cuse_exit); - -MODULE_AUTHOR("Tejun Heo "); -MODULE_DESCRIPTION("Character device in Userspace"); -MODULE_LICENSE("GPL"); diff --git a/trunk/fs/fuse/dev.c b/trunk/fs/fuse/dev.c index 8fed2ed12f38..ba76b68c52ff 100644 --- a/trunk/fs/fuse/dev.c +++ b/trunk/fs/fuse/dev.c @@ -46,7 +46,6 @@ struct fuse_req *fuse_request_alloc(void) fuse_request_init(req); return req; } -EXPORT_SYMBOL_GPL(fuse_request_alloc); struct fuse_req *fuse_request_alloc_nofs(void) { @@ -125,7 +124,6 @@ struct fuse_req *fuse_get_req(struct fuse_conn *fc) atomic_dec(&fc->num_waiting); return ERR_PTR(err); } -EXPORT_SYMBOL_GPL(fuse_get_req); /* * Return request in fuse_file->reserved_req. However that may @@ -210,7 +208,6 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) fuse_request_free(req); } } -EXPORT_SYMBOL_GPL(fuse_put_request); static unsigned len_args(unsigned numargs, struct fuse_arg *args) { @@ -285,7 +282,7 @@ __releases(&fc->lock) wake_up_all(&fc->blocked_waitq); } if (fc->num_background == FUSE_CONGESTION_THRESHOLD && - fc->connected && fc->bdi_initialized) { + fc->connected) { clear_bdi_congested(&fc->bdi, READ); clear_bdi_congested(&fc->bdi, WRITE); } @@ -403,7 +400,6 @@ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) } spin_unlock(&fc->lock); } -EXPORT_SYMBOL_GPL(fuse_request_send); static void fuse_request_send_nowait_locked(struct fuse_conn *fc, struct fuse_req *req) @@ -412,8 +408,7 @@ static void fuse_request_send_nowait_locked(struct fuse_conn *fc, fc->num_background++; if (fc->num_background == FUSE_MAX_BACKGROUND) fc->blocked = 1; - if (fc->num_background == FUSE_CONGESTION_THRESHOLD && - fc->bdi_initialized) { + if (fc->num_background == FUSE_CONGESTION_THRESHOLD) { set_bdi_congested(&fc->bdi, READ); set_bdi_congested(&fc->bdi, WRITE); } @@ -444,7 +439,6 @@ void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) req->isreply = 1; fuse_request_send_nowait(fc, req); } -EXPORT_SYMBOL_GPL(fuse_request_send_background); /* * Called under fc->lock @@ -1111,9 +1105,8 @@ void fuse_abort_conn(struct fuse_conn *fc) } spin_unlock(&fc->lock); } -EXPORT_SYMBOL_GPL(fuse_abort_conn); -int fuse_dev_release(struct inode *inode, struct file *file) +static int fuse_dev_release(struct inode *inode, struct file *file) { struct fuse_conn *fc = fuse_get_conn(file); if (fc) { @@ -1127,7 +1120,6 @@ int fuse_dev_release(struct inode *inode, struct file *file) return 0; } -EXPORT_SYMBOL_GPL(fuse_dev_release); static int fuse_dev_fasync(int fd, struct file *file, int on) { @@ -1150,7 +1142,6 @@ const struct file_operations fuse_dev_operations = { .release = fuse_dev_release, .fasync = fuse_dev_fasync, }; -EXPORT_SYMBOL_GPL(fuse_dev_operations); static struct miscdevice fuse_miscdevice = { .minor = FUSE_MINOR, diff --git a/trunk/fs/fuse/dir.c b/trunk/fs/fuse/dir.c index b3089a083d30..8b8eebc5614b 100644 --- a/trunk/fs/fuse/dir.c +++ b/trunk/fs/fuse/dir.c @@ -361,6 +361,19 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, return ERR_PTR(err); } +/* + * Synchronous release for the case when something goes wrong in CREATE_OPEN + */ +static void fuse_sync_release(struct fuse_conn *fc, struct fuse_file *ff, + u64 nodeid, int flags) +{ + fuse_release_fill(ff, nodeid, flags, FUSE_RELEASE); + ff->reserved_req->force = 1; + fuse_request_send(fc, ff->reserved_req); + fuse_put_request(fc, ff->reserved_req); + kfree(ff); +} + /* * Atomic create+open operation * @@ -432,14 +445,12 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, goto out_free_ff; fuse_put_request(fc, req); - ff->fh = outopen.fh; - ff->nodeid = outentry.nodeid; - ff->open_flags = outopen.open_flags; inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation, &outentry.attr, entry_attr_timeout(&outentry), 0); if (!inode) { flags &= ~(O_CREAT | O_EXCL | O_TRUNC); - fuse_sync_release(ff, flags); + ff->fh = outopen.fh; + fuse_sync_release(fc, ff, outentry.nodeid, flags); fuse_send_forget(fc, forget_req, outentry.nodeid, 1); return -ENOMEM; } @@ -449,11 +460,11 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, fuse_invalidate_attr(dir); file = lookup_instantiate_filp(nd, entry, generic_file_open); if (IS_ERR(file)) { - fuse_sync_release(ff, flags); + ff->fh = outopen.fh; + fuse_sync_release(fc, ff, outentry.nodeid, flags); return PTR_ERR(file); } - file->private_data = fuse_file_get(ff); - fuse_finish_open(inode, file); + fuse_finish_open(inode, file, ff, &outopen); return 0; out_free_ff: @@ -1024,7 +1035,7 @@ static int fuse_readdir(struct file *file, void *dstbuf, filldir_t filldir) req->out.argpages = 1; req->num_pages = 1; req->pages[0] = page; - fuse_read_fill(req, file, file->f_pos, PAGE_SIZE, FUSE_READDIR); + fuse_read_fill(req, file, inode, file->f_pos, PAGE_SIZE, FUSE_READDIR); fuse_request_send(fc, req); nbytes = req->out.args[0].size; err = req->out.h.error; @@ -1090,14 +1101,12 @@ static void fuse_put_link(struct dentry *dentry, struct nameidata *nd, void *c) static int fuse_dir_open(struct inode *inode, struct file *file) { - return fuse_open_common(inode, file, true); + return fuse_open_common(inode, file, 1); } static int fuse_dir_release(struct inode *inode, struct file *file) { - fuse_release_common(file, FUSE_RELEASEDIR); - - return 0; + return fuse_release_common(inode, file, 1); } static int fuse_dir_fsync(struct file *file, struct dentry *de, int datasync) diff --git a/trunk/fs/fuse/file.c b/trunk/fs/fuse/file.c index fce6ce694fde..06f30e965676 100644 --- a/trunk/fs/fuse/file.c +++ b/trunk/fs/fuse/file.c @@ -12,13 +12,13 @@ #include #include #include -#include static const struct file_operations fuse_direct_io_file_operations; -static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file, - int opcode, struct fuse_open_out *outargp) +static int fuse_send_open(struct inode *inode, struct file *file, int isdir, + struct fuse_open_out *outargp) { + struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_open_in inarg; struct fuse_req *req; int err; @@ -31,8 +31,8 @@ static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file, inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); if (!fc->atomic_o_trunc) inarg.flags &= ~O_TRUNC; - req->in.h.opcode = opcode; - req->in.h.nodeid = nodeid; + req->in.h.opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; + req->in.h.nodeid = get_node_id(inode); req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; @@ -49,27 +49,22 @@ static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file, struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) { struct fuse_file *ff; - ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); - if (unlikely(!ff)) - return NULL; - - ff->fc = fc; - ff->reserved_req = fuse_request_alloc(); - if (unlikely(!ff->reserved_req)) { - kfree(ff); - return NULL; + if (ff) { + ff->reserved_req = fuse_request_alloc(); + if (!ff->reserved_req) { + kfree(ff); + return NULL; + } else { + INIT_LIST_HEAD(&ff->write_entry); + atomic_set(&ff->count, 0); + spin_lock(&fc->lock); + ff->kh = ++fc->khctr; + spin_unlock(&fc->lock); + } + RB_CLEAR_NODE(&ff->polled_node); + init_waitqueue_head(&ff->poll_wait); } - - INIT_LIST_HEAD(&ff->write_entry); - atomic_set(&ff->count, 0); - RB_CLEAR_NODE(&ff->polled_node); - init_waitqueue_head(&ff->poll_wait); - - spin_lock(&fc->lock); - ff->kh = ++fc->khctr; - spin_unlock(&fc->lock); - return ff; } @@ -79,7 +74,7 @@ void fuse_file_free(struct fuse_file *ff) kfree(ff); } -struct fuse_file *fuse_file_get(struct fuse_file *ff) +static struct fuse_file *fuse_file_get(struct fuse_file *ff) { atomic_inc(&ff->count); return ff; @@ -87,65 +82,40 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff) static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) { - path_put(&req->misc.release.path); + dput(req->misc.release.dentry); + mntput(req->misc.release.vfsmount); } static void fuse_file_put(struct fuse_file *ff) { if (atomic_dec_and_test(&ff->count)) { struct fuse_req *req = ff->reserved_req; - + struct inode *inode = req->misc.release.dentry->d_inode; + struct fuse_conn *fc = get_fuse_conn(inode); req->end = fuse_release_end; - fuse_request_send_background(ff->fc, req); + fuse_request_send_background(fc, req); kfree(ff); } } -int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, - bool isdir) +void fuse_finish_open(struct inode *inode, struct file *file, + struct fuse_file *ff, struct fuse_open_out *outarg) { - struct fuse_open_out outarg; - struct fuse_file *ff; - int err; - int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; - - ff = fuse_file_alloc(fc); - if (!ff) - return -ENOMEM; - - err = fuse_send_open(fc, nodeid, file, opcode, &outarg); - if (err) { - fuse_file_free(ff); - return err; - } - - if (isdir) - outarg.open_flags &= ~FOPEN_DIRECT_IO; - - ff->fh = outarg.fh; - ff->nodeid = nodeid; - ff->open_flags = outarg.open_flags; - file->private_data = fuse_file_get(ff); - - return 0; -} -EXPORT_SYMBOL_GPL(fuse_do_open); - -void fuse_finish_open(struct inode *inode, struct file *file) -{ - struct fuse_file *ff = file->private_data; - - if (ff->open_flags & FOPEN_DIRECT_IO) + if (outarg->open_flags & FOPEN_DIRECT_IO) file->f_op = &fuse_direct_io_file_operations; - if (!(ff->open_flags & FOPEN_KEEP_CACHE)) + if (!(outarg->open_flags & FOPEN_KEEP_CACHE)) invalidate_inode_pages2(inode->i_mapping); - if (ff->open_flags & FOPEN_NONSEEKABLE) + if (outarg->open_flags & FOPEN_NONSEEKABLE) nonseekable_open(inode, file); + ff->fh = outarg->fh; + file->private_data = fuse_file_get(ff); } -int fuse_open_common(struct inode *inode, struct file *file, bool isdir) +int fuse_open_common(struct inode *inode, struct file *file, int isdir) { struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_open_out outarg; + struct fuse_file *ff; int err; /* VFS checks this, but only _after_ ->open() */ @@ -156,85 +126,78 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) if (err) return err; - err = fuse_do_open(fc, get_node_id(inode), file, isdir); - if (err) - return err; + ff = fuse_file_alloc(fc); + if (!ff) + return -ENOMEM; - fuse_finish_open(inode, file); + err = fuse_send_open(inode, file, isdir, &outarg); + if (err) + fuse_file_free(ff); + else { + if (isdir) + outarg.open_flags &= ~FOPEN_DIRECT_IO; + fuse_finish_open(inode, file, ff, &outarg); + } - return 0; + return err; } -static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode) +void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode) { - struct fuse_conn *fc = ff->fc; struct fuse_req *req = ff->reserved_req; struct fuse_release_in *inarg = &req->misc.release.in; - spin_lock(&fc->lock); - list_del(&ff->write_entry); - if (!RB_EMPTY_NODE(&ff->polled_node)) - rb_erase(&ff->polled_node, &fc->polled_files); - spin_unlock(&fc->lock); - - wake_up_interruptible_sync(&ff->poll_wait); - inarg->fh = ff->fh; inarg->flags = flags; req->in.h.opcode = opcode; - req->in.h.nodeid = ff->nodeid; + req->in.h.nodeid = nodeid; req->in.numargs = 1; req->in.args[0].size = sizeof(struct fuse_release_in); req->in.args[0].value = inarg; } -void fuse_release_common(struct file *file, int opcode) +int fuse_release_common(struct inode *inode, struct file *file, int isdir) { - struct fuse_file *ff; - struct fuse_req *req; + struct fuse_file *ff = file->private_data; + if (ff) { + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_req *req = ff->reserved_req; - ff = file->private_data; - if (unlikely(!ff)) - return; + fuse_release_fill(ff, get_node_id(inode), file->f_flags, + isdir ? FUSE_RELEASEDIR : FUSE_RELEASE); - req = ff->reserved_req; - fuse_prepare_release(ff, file->f_flags, opcode); + /* Hold vfsmount and dentry until release is finished */ + req->misc.release.vfsmount = mntget(file->f_path.mnt); + req->misc.release.dentry = dget(file->f_path.dentry); + + spin_lock(&fc->lock); + list_del(&ff->write_entry); + if (!RB_EMPTY_NODE(&ff->polled_node)) + rb_erase(&ff->polled_node, &fc->polled_files); + spin_unlock(&fc->lock); - /* Hold vfsmount and dentry until release is finished */ - path_get(&file->f_path); - req->misc.release.path = file->f_path; + wake_up_interruptible_sync(&ff->poll_wait); + /* + * Normally this will send the RELEASE request, + * however if some asynchronous READ or WRITE requests + * are outstanding, the sending will be delayed + */ + fuse_file_put(ff); + } - /* - * Normally this will send the RELEASE request, however if - * some asynchronous READ or WRITE requests are outstanding, - * the sending will be delayed. - */ - fuse_file_put(ff); + /* Return value is ignored by VFS */ + return 0; } static int fuse_open(struct inode *inode, struct file *file) { - return fuse_open_common(inode, file, false); + return fuse_open_common(inode, file, 0); } static int fuse_release(struct inode *inode, struct file *file) { - fuse_release_common(file, FUSE_RELEASE); - - /* return value is ignored by VFS */ - return 0; -} - -void fuse_sync_release(struct fuse_file *ff, int flags) -{ - WARN_ON(atomic_read(&ff->count) > 1); - fuse_prepare_release(ff, flags, FUSE_RELEASE); - ff->reserved_req->force = 1; - fuse_request_send(ff->fc, ff->reserved_req); - fuse_put_request(ff->fc, ff->reserved_req); - kfree(ff); + return fuse_release_common(inode, file, 0); } -EXPORT_SYMBOL_GPL(fuse_sync_release); /* * Scramble the ID space with XTEA, so that the value of the files_struct @@ -408,8 +371,8 @@ static int fuse_fsync(struct file *file, struct dentry *de, int datasync) return fuse_fsync_common(file, de, datasync, 0); } -void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, - size_t count, int opcode) +void fuse_read_fill(struct fuse_req *req, struct file *file, + struct inode *inode, loff_t pos, size_t count, int opcode) { struct fuse_read_in *inarg = &req->misc.read.in; struct fuse_file *ff = file->private_data; @@ -419,7 +382,7 @@ void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, inarg->size = count; inarg->flags = file->f_flags; req->in.h.opcode = opcode; - req->in.h.nodeid = ff->nodeid; + req->in.h.nodeid = get_node_id(inode); req->in.numargs = 1; req->in.args[0].size = sizeof(struct fuse_read_in); req->in.args[0].value = inarg; @@ -429,12 +392,12 @@ void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, } static size_t fuse_send_read(struct fuse_req *req, struct file *file, - loff_t pos, size_t count, fl_owner_t owner) + struct inode *inode, loff_t pos, size_t count, + fl_owner_t owner) { - struct fuse_file *ff = file->private_data; - struct fuse_conn *fc = ff->fc; + struct fuse_conn *fc = get_fuse_conn(inode); - fuse_read_fill(req, file, pos, count, FUSE_READ); + fuse_read_fill(req, file, inode, pos, count, FUSE_READ); if (owner != NULL) { struct fuse_read_in *inarg = &req->misc.read.in; @@ -492,7 +455,7 @@ static int fuse_readpage(struct file *file, struct page *page) req->out.argpages = 1; req->num_pages = 1; req->pages[0] = page; - num_read = fuse_send_read(req, file, pos, count, NULL); + num_read = fuse_send_read(req, file, inode, pos, count, NULL); err = req->out.h.error; fuse_put_request(fc, req); @@ -541,18 +504,19 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) fuse_file_put(req->ff); } -static void fuse_send_readpages(struct fuse_req *req, struct file *file) +static void fuse_send_readpages(struct fuse_req *req, struct file *file, + struct inode *inode) { - struct fuse_file *ff = file->private_data; - struct fuse_conn *fc = ff->fc; + struct fuse_conn *fc = get_fuse_conn(inode); loff_t pos = page_offset(req->pages[0]); size_t count = req->num_pages << PAGE_CACHE_SHIFT; req->out.argpages = 1; req->out.page_zeroing = 1; - fuse_read_fill(req, file, pos, count, FUSE_READ); + fuse_read_fill(req, file, inode, pos, count, FUSE_READ); req->misc.read.attr_ver = fuse_get_attr_version(fc); if (fc->async_read) { + struct fuse_file *ff = file->private_data; req->ff = fuse_file_get(ff); req->end = fuse_readpages_end; fuse_request_send_background(fc, req); @@ -582,7 +546,7 @@ static int fuse_readpages_fill(void *_data, struct page *page) (req->num_pages == FUSE_MAX_PAGES_PER_REQ || (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || req->pages[req->num_pages - 1]->index + 1 != page->index)) { - fuse_send_readpages(req, data->file); + fuse_send_readpages(req, data->file, inode); data->req = req = fuse_get_req(fc); if (IS_ERR(req)) { unlock_page(page); @@ -616,7 +580,7 @@ static int fuse_readpages(struct file *file, struct address_space *mapping, err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); if (!err) { if (data.req->num_pages) - fuse_send_readpages(data.req, file); + fuse_send_readpages(data.req, file, inode); else fuse_put_request(fc, data.req); } @@ -643,19 +607,24 @@ static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov, return generic_file_aio_read(iocb, iov, nr_segs, pos); } -static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff, - loff_t pos, size_t count) +static void fuse_write_fill(struct fuse_req *req, struct file *file, + struct fuse_file *ff, struct inode *inode, + loff_t pos, size_t count, int writepage) { + struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_write_in *inarg = &req->misc.write.in; struct fuse_write_out *outarg = &req->misc.write.out; + memset(inarg, 0, sizeof(struct fuse_write_in)); inarg->fh = ff->fh; inarg->offset = pos; inarg->size = count; + inarg->write_flags = writepage ? FUSE_WRITE_CACHE : 0; + inarg->flags = file ? file->f_flags : 0; req->in.h.opcode = FUSE_WRITE; - req->in.h.nodeid = ff->nodeid; + req->in.h.nodeid = get_node_id(inode); req->in.numargs = 2; - if (ff->fc->minor < 9) + if (fc->minor < 9) req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; else req->in.args[0].size = sizeof(struct fuse_write_in); @@ -667,15 +636,13 @@ static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff, } static size_t fuse_send_write(struct fuse_req *req, struct file *file, - loff_t pos, size_t count, fl_owner_t owner) + struct inode *inode, loff_t pos, size_t count, + fl_owner_t owner) { - struct fuse_file *ff = file->private_data; - struct fuse_conn *fc = ff->fc; - struct fuse_write_in *inarg = &req->misc.write.in; - - fuse_write_fill(req, ff, pos, count); - inarg->flags = file->f_flags; + struct fuse_conn *fc = get_fuse_conn(inode); + fuse_write_fill(req, file, file->private_data, inode, pos, count, 0); if (owner != NULL) { + struct fuse_write_in *inarg = &req->misc.write.in; inarg->write_flags |= FUSE_WRITE_LOCKOWNER; inarg->lock_owner = fuse_lock_owner_id(fc, owner); } @@ -733,7 +700,7 @@ static int fuse_buffered_write(struct file *file, struct inode *inode, req->num_pages = 1; req->pages[0] = page; req->page_offset = offset; - nres = fuse_send_write(req, file, pos, count, NULL); + nres = fuse_send_write(req, file, inode, pos, count, NULL); err = req->out.h.error; fuse_put_request(fc, req); if (!err && !nres) @@ -774,7 +741,7 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, for (i = 0; i < req->num_pages; i++) fuse_wait_on_page_writeback(inode, req->pages[i]->index); - res = fuse_send_write(req, file, pos, count, NULL); + res = fuse_send_write(req, file, inode, pos, count, NULL); offset = req->page_offset; count = res; @@ -1012,23 +979,25 @@ static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf, return 0; } -ssize_t fuse_direct_io(struct file *file, const char __user *buf, - size_t count, loff_t *ppos, int write) +static ssize_t fuse_direct_io(struct file *file, const char __user *buf, + size_t count, loff_t *ppos, int write) { - struct fuse_file *ff = file->private_data; - struct fuse_conn *fc = ff->fc; + struct inode *inode = file->f_path.dentry->d_inode; + struct fuse_conn *fc = get_fuse_conn(inode); size_t nmax = write ? fc->max_write : fc->max_read; loff_t pos = *ppos; ssize_t res = 0; struct fuse_req *req; + if (is_bad_inode(inode)) + return -EIO; + req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); while (count) { size_t nres; - fl_owner_t owner = current->files; size_t nbytes = min(count, nmax); int err = fuse_get_user_pages(req, buf, &nbytes, write); if (err) { @@ -1037,10 +1006,11 @@ ssize_t fuse_direct_io(struct file *file, const char __user *buf, } if (write) - nres = fuse_send_write(req, file, pos, nbytes, owner); + nres = fuse_send_write(req, file, inode, pos, nbytes, + current->files); else - nres = fuse_send_read(req, file, pos, nbytes, owner); - + nres = fuse_send_read(req, file, inode, pos, nbytes, + current->files); fuse_release_user_pages(req, !write); if (req->out.h.error) { if (!res) @@ -1064,27 +1034,20 @@ ssize_t fuse_direct_io(struct file *file, const char __user *buf, } } fuse_put_request(fc, req); - if (res > 0) + if (res > 0) { + if (write) + fuse_write_update_size(inode, pos); *ppos = pos; + } + fuse_invalidate_attr(inode); return res; } -EXPORT_SYMBOL_GPL(fuse_direct_io); static ssize_t fuse_direct_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - ssize_t res; - struct inode *inode = file->f_path.dentry->d_inode; - - if (is_bad_inode(inode)) - return -EIO; - - res = fuse_direct_io(file, buf, count, ppos, 0); - - fuse_invalidate_attr(inode); - - return res; + return fuse_direct_io(file, buf, count, ppos, 0); } static ssize_t fuse_direct_write(struct file *file, const char __user *buf, @@ -1092,22 +1055,12 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf, { struct inode *inode = file->f_path.dentry->d_inode; ssize_t res; - - if (is_bad_inode(inode)) - return -EIO; - /* Don't allow parallel writes to the same file */ mutex_lock(&inode->i_mutex); res = generic_write_checks(file, ppos, &count, 0); - if (!res) { + if (!res) res = fuse_direct_io(file, buf, count, ppos, 1); - if (res > 0) - fuse_write_update_size(inode, *ppos); - } mutex_unlock(&inode->i_mutex); - - fuse_invalidate_attr(inode); - return res; } @@ -1224,10 +1177,9 @@ static int fuse_writepage_locked(struct page *page) req->ff = fuse_file_get(ff); spin_unlock(&fc->lock); - fuse_write_fill(req, ff, page_offset(page), 0); + fuse_write_fill(req, NULL, ff, inode, page_offset(page), 0, 1); copy_highpage(tmp_page, page); - req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; req->in.argpages = 1; req->num_pages = 1; req->pages[0] = tmp_page; @@ -1651,11 +1603,12 @@ static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, * limits ioctl data transfers to well-formed ioctls and is the forced * behavior for all FUSE servers. */ -long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, - unsigned int flags) +static long fuse_file_do_ioctl(struct file *file, unsigned int cmd, + unsigned long arg, unsigned int flags) { + struct inode *inode = file->f_dentry->d_inode; struct fuse_file *ff = file->private_data; - struct fuse_conn *fc = ff->fc; + struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_ioctl_in inarg = { .fh = ff->fh, .cmd = cmd, @@ -1674,6 +1627,13 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, /* assume all the iovs returned by client always fits in a page */ BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); + if (!fuse_allow_task(fc, current)) + return -EACCES; + + err = -EIO; + if (is_bad_inode(inode)) + goto out; + err = -ENOMEM; pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL); iov_page = alloc_page(GFP_KERNEL); @@ -1734,7 +1694,7 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, /* okay, let's send it to the client */ req->in.h.opcode = FUSE_IOCTL; - req->in.h.nodeid = ff->nodeid; + req->in.h.nodeid = get_node_id(inode); req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; @@ -1817,33 +1777,17 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, return err ? err : outarg.result; } -EXPORT_SYMBOL_GPL(fuse_do_ioctl); - -static long fuse_file_ioctl_common(struct file *file, unsigned int cmd, - unsigned long arg, unsigned int flags) -{ - struct inode *inode = file->f_dentry->d_inode; - struct fuse_conn *fc = get_fuse_conn(inode); - - if (!fuse_allow_task(fc, current)) - return -EACCES; - - if (is_bad_inode(inode)) - return -EIO; - - return fuse_do_ioctl(file, cmd, arg, flags); -} static long fuse_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - return fuse_file_ioctl_common(file, cmd, arg, 0); + return fuse_file_do_ioctl(file, cmd, arg, 0); } static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - return fuse_file_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT); + return fuse_file_do_ioctl(file, cmd, arg, FUSE_IOCTL_COMPAT); } /* @@ -1897,10 +1841,11 @@ static void fuse_register_polled_file(struct fuse_conn *fc, spin_unlock(&fc->lock); } -unsigned fuse_file_poll(struct file *file, poll_table *wait) +static unsigned fuse_file_poll(struct file *file, poll_table *wait) { + struct inode *inode = file->f_dentry->d_inode; struct fuse_file *ff = file->private_data; - struct fuse_conn *fc = ff->fc; + struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; struct fuse_poll_out outarg; struct fuse_req *req; @@ -1925,7 +1870,7 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait) return PTR_ERR(req); req->in.h.opcode = FUSE_POLL; - req->in.h.nodeid = ff->nodeid; + req->in.h.nodeid = get_node_id(inode); req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; @@ -1944,7 +1889,6 @@ unsigned fuse_file_poll(struct file *file, poll_table *wait) } return POLLERR; } -EXPORT_SYMBOL_GPL(fuse_file_poll); /* * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and diff --git a/trunk/fs/fuse/fuse_i.h b/trunk/fs/fuse/fuse_i.h index aaf2f9ff970e..6fc5aedaa0d5 100644 --- a/trunk/fs/fuse/fuse_i.h +++ b/trunk/fs/fuse/fuse_i.h @@ -97,13 +97,8 @@ struct fuse_inode { struct list_head writepages; }; -struct fuse_conn; - /** FUSE specific file data */ struct fuse_file { - /** Fuse connection for this file */ - struct fuse_conn *fc; - /** Request reserved for flush and release */ struct fuse_req *reserved_req; @@ -113,15 +108,9 @@ struct fuse_file { /** File handle used by userspace */ u64 fh; - /** Node id of this file */ - u64 nodeid; - /** Refcount */ atomic_t count; - /** FOPEN_* flags returned by open */ - u32 open_flags; - /** Entry on inode's write_files list */ struct list_head write_entry; @@ -196,6 +185,8 @@ enum fuse_req_state { FUSE_REQ_FINISHED }; +struct fuse_conn; + /** * A request to the client */ @@ -257,12 +248,11 @@ struct fuse_req { struct fuse_forget_in forget_in; struct { struct fuse_release_in in; - struct path path; + struct vfsmount *vfsmount; + struct dentry *dentry; } release; struct fuse_init_in init_in; struct fuse_init_out init_out; - struct cuse_init_in cuse_init_in; - struct cuse_init_out cuse_init_out; struct { struct fuse_read_in in; u64 attr_ver; @@ -396,9 +386,6 @@ struct fuse_conn { /** Filesystem supports NFS exporting. Only set in INIT */ unsigned export_support:1; - /** Set if bdi is valid */ - unsigned bdi_initialized:1; - /* * The following bitfields are only for optimization purposes * and hence races in setting them will not cause malfunction @@ -528,24 +515,25 @@ void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req, * Initialize READ or READDIR request */ void fuse_read_fill(struct fuse_req *req, struct file *file, - loff_t pos, size_t count, int opcode); + struct inode *inode, loff_t pos, size_t count, int opcode); /** * Send OPEN or OPENDIR request */ -int fuse_open_common(struct inode *inode, struct file *file, bool isdir); +int fuse_open_common(struct inode *inode, struct file *file, int isdir); struct fuse_file *fuse_file_alloc(struct fuse_conn *fc); -struct fuse_file *fuse_file_get(struct fuse_file *ff); void fuse_file_free(struct fuse_file *ff); -void fuse_finish_open(struct inode *inode, struct file *file); +void fuse_finish_open(struct inode *inode, struct file *file, + struct fuse_file *ff, struct fuse_open_out *outarg); -void fuse_sync_release(struct fuse_file *ff, int flags); +/** Fill in ff->reserved_req with a RELEASE request */ +void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode); /** * Send RELEASE or RELEASEDIR request */ -void fuse_release_common(struct file *file, int opcode); +int fuse_release_common(struct inode *inode, struct file *file, int isdir); /** * Send FSYNC or FSYNCDIR request @@ -664,12 +652,10 @@ void fuse_invalidate_entry_cache(struct dentry *entry); */ struct fuse_conn *fuse_conn_get(struct fuse_conn *fc); -void fuse_conn_kill(struct fuse_conn *fc); - /** * Initialize fuse_conn */ -void fuse_conn_init(struct fuse_conn *fc); +int fuse_conn_init(struct fuse_conn *fc, struct super_block *sb); /** * Release reference to fuse_conn @@ -708,13 +694,4 @@ void fuse_release_nowrite(struct inode *inode); u64 fuse_get_attr_version(struct fuse_conn *fc); -int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, - bool isdir); -ssize_t fuse_direct_io(struct file *file, const char __user *buf, - size_t count, loff_t *ppos, int write); -long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, - unsigned int flags); -unsigned fuse_file_poll(struct file *file, poll_table *wait); -int fuse_dev_release(struct inode *inode, struct file *file); - #endif /* _FS_FUSE_I_H */ diff --git a/trunk/fs/fuse/inode.c b/trunk/fs/fuse/inode.c index f0df55a52929..91f7c85f1ffd 100644 --- a/trunk/fs/fuse/inode.c +++ b/trunk/fs/fuse/inode.c @@ -277,14 +277,11 @@ static void fuse_send_destroy(struct fuse_conn *fc) } } -static void fuse_bdi_destroy(struct fuse_conn *fc) +static void fuse_put_super(struct super_block *sb) { - if (fc->bdi_initialized) - bdi_destroy(&fc->bdi); -} + struct fuse_conn *fc = get_fuse_conn_super(sb); -void fuse_conn_kill(struct fuse_conn *fc) -{ + fuse_send_destroy(fc); spin_lock(&fc->lock); fc->connected = 0; fc->blocked = 0; @@ -298,16 +295,7 @@ void fuse_conn_kill(struct fuse_conn *fc) list_del(&fc->entry); fuse_ctl_remove_conn(fc); mutex_unlock(&fuse_mutex); - fuse_bdi_destroy(fc); -} -EXPORT_SYMBOL_GPL(fuse_conn_kill); - -static void fuse_put_super(struct super_block *sb) -{ - struct fuse_conn *fc = get_fuse_conn_super(sb); - - fuse_send_destroy(fc); - fuse_conn_kill(fc); + bdi_destroy(&fc->bdi); fuse_conn_put(fc); } @@ -478,8 +466,10 @@ static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt) return 0; } -void fuse_conn_init(struct fuse_conn *fc) +int fuse_conn_init(struct fuse_conn *fc, struct super_block *sb) { + int err; + memset(fc, 0, sizeof(*fc)); spin_lock_init(&fc->lock); mutex_init(&fc->inst_mutex); @@ -494,12 +484,49 @@ void fuse_conn_init(struct fuse_conn *fc) INIT_LIST_HEAD(&fc->bg_queue); INIT_LIST_HEAD(&fc->entry); atomic_set(&fc->num_waiting, 0); + fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; + fc->bdi.unplug_io_fn = default_unplug_io_fn; + /* fuse does it's own writeback accounting */ + fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB; fc->khctr = 0; fc->polled_files = RB_ROOT; + fc->dev = sb->s_dev; + err = bdi_init(&fc->bdi); + if (err) + goto error_mutex_destroy; + if (sb->s_bdev) { + err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk", + MAJOR(fc->dev), MINOR(fc->dev)); + } else { + err = bdi_register_dev(&fc->bdi, fc->dev); + } + if (err) + goto error_bdi_destroy; + /* + * For a single fuse filesystem use max 1% of dirty + + * writeback threshold. + * + * This gives about 1M of write buffer for memory maps on a + * machine with 1G and 10% dirty_ratio, which should be more + * than enough. + * + * Privileged users can raise it by writing to + * + * /sys/class/bdi//max_ratio + */ + bdi_set_max_ratio(&fc->bdi, 1); fc->reqctr = 0; fc->blocked = 1; fc->attr_version = 1; get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); + + return 0; + + error_bdi_destroy: + bdi_destroy(&fc->bdi); + error_mutex_destroy: + mutex_destroy(&fc->inst_mutex); + return err; } EXPORT_SYMBOL_GPL(fuse_conn_init); @@ -512,14 +539,12 @@ void fuse_conn_put(struct fuse_conn *fc) fc->release(fc); } } -EXPORT_SYMBOL_GPL(fuse_conn_put); struct fuse_conn *fuse_conn_get(struct fuse_conn *fc) { atomic_inc(&fc->count); return fc; } -EXPORT_SYMBOL_GPL(fuse_conn_get); static struct inode *fuse_get_root_inode(struct super_block *sb, unsigned mode) { @@ -772,48 +797,6 @@ static void fuse_free_conn(struct fuse_conn *fc) kfree(fc); } -static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) -{ - int err; - - fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; - fc->bdi.unplug_io_fn = default_unplug_io_fn; - /* fuse does it's own writeback accounting */ - fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB; - - err = bdi_init(&fc->bdi); - if (err) - return err; - - fc->bdi_initialized = 1; - - if (sb->s_bdev) { - err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk", - MAJOR(fc->dev), MINOR(fc->dev)); - } else { - err = bdi_register_dev(&fc->bdi, fc->dev); - } - - if (err) - return err; - - /* - * For a single fuse filesystem use max 1% of dirty + - * writeback threshold. - * - * This gives about 1M of write buffer for memory maps on a - * machine with 1G and 10% dirty_ratio, which should be more - * than enough. - * - * Privileged users can raise it by writing to - * - * /sys/class/bdi//max_ratio - */ - bdi_set_max_ratio(&fc->bdi, 1); - - return 0; -} - static int fuse_fill_super(struct super_block *sb, void *data, int silent) { struct fuse_conn *fc; @@ -860,12 +843,11 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) if (!fc) goto err_fput; - fuse_conn_init(fc); - - fc->dev = sb->s_dev; - err = fuse_bdi_init(fc, sb); - if (err) - goto err_put_conn; + err = fuse_conn_init(fc, sb); + if (err) { + kfree(fc); + goto err_fput; + } fc->release = fuse_free_conn; fc->flags = d.flags; @@ -929,7 +911,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) err_put_root: dput(root_dentry); err_put_conn: - fuse_bdi_destroy(fc); + bdi_destroy(&fc->bdi); fuse_conn_put(fc); err_fput: fput(file); diff --git a/trunk/fs/gfs2/Makefile b/trunk/fs/gfs2/Makefile index 3da2f1f4f738..d53a9bea1c2f 100644 --- a/trunk/fs/gfs2/Makefile +++ b/trunk/fs/gfs2/Makefile @@ -1,4 +1,3 @@ -EXTRA_CFLAGS := -I$(src) obj-$(CONFIG_GFS2_FS) += gfs2.o gfs2-y := acl.o bmap.o dir.o eaops.o eattr.o glock.o \ glops.o inode.o log.o lops.o main.o meta_io.o \ diff --git a/trunk/fs/gfs2/bmap.c b/trunk/fs/gfs2/bmap.c index 6d47379e794b..329763530dc0 100644 --- a/trunk/fs/gfs2/bmap.c +++ b/trunk/fs/gfs2/bmap.c @@ -25,7 +25,6 @@ #include "trans.h" #include "dir.h" #include "util.h" -#include "trace_gfs2.h" /* This doesn't need to be that large as max 64 bit pointers in a 4k * block is 512, so __u16 is fine for that. It saves stack space to @@ -590,7 +589,6 @@ int gfs2_block_map(struct inode *inode, sector_t lblock, clear_buffer_mapped(bh_map); clear_buffer_new(bh_map); clear_buffer_boundary(bh_map); - trace_gfs2_bmap(ip, bh_map, lblock, create, 1); if (gfs2_is_dir(ip)) { bsize = sdp->sd_jbsize; arr = sdp->sd_jheightsize; @@ -625,7 +623,6 @@ int gfs2_block_map(struct inode *inode, sector_t lblock, ret = 0; out: release_metapath(&mp); - trace_gfs2_bmap(ip, bh_map, lblock, create, ret); bmap_unlock(ip, create); return ret; diff --git a/trunk/fs/gfs2/glock.c b/trunk/fs/gfs2/glock.c index 297421c0427a..2bf62bcc5181 100644 --- a/trunk/fs/gfs2/glock.c +++ b/trunk/fs/gfs2/glock.c @@ -39,8 +39,6 @@ #include "super.h" #include "util.h" #include "bmap.h" -#define CREATE_TRACE_POINTS -#include "trace_gfs2.h" struct gfs2_gl_hash_bucket { struct hlist_head hb_list; @@ -157,7 +155,7 @@ static void glock_free(struct gfs2_glock *gl) if (aspace) gfs2_aspace_put(aspace); - trace_gfs2_glock_put(gl); + sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl); } @@ -319,17 +317,14 @@ __acquires(&gl->gl_spin) return 2; gh->gh_error = ret; list_del_init(&gh->gh_list); - trace_gfs2_glock_queue(gh, 0); gfs2_holder_wake(gh); goto restart; } set_bit(HIF_HOLDER, &gh->gh_iflags); - trace_gfs2_promote(gh, 1); gfs2_holder_wake(gh); goto restart; } set_bit(HIF_HOLDER, &gh->gh_iflags); - trace_gfs2_promote(gh, 0); gfs2_holder_wake(gh); continue; } @@ -359,7 +354,6 @@ static inline void do_error(struct gfs2_glock *gl, const int ret) else continue; list_del_init(&gh->gh_list); - trace_gfs2_glock_queue(gh, 0); gfs2_holder_wake(gh); } } @@ -428,7 +422,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) int rv; spin_lock(&gl->gl_spin); - trace_gfs2_glock_state_change(gl, state); state_change(gl, state); gh = find_first_waiter(gl); @@ -858,7 +851,6 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state, gl->gl_demote_state != state) { gl->gl_demote_state = LM_ST_UNLOCKED; } - trace_gfs2_demote_rq(gl); } /** @@ -944,7 +936,6 @@ __acquires(&gl->gl_spin) goto do_cancel; return; } - trace_gfs2_glock_queue(gh, 1); list_add_tail(&gh->gh_list, insert_pt); do_cancel: gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); @@ -1041,7 +1032,6 @@ void gfs2_glock_dq(struct gfs2_holder *gh) !test_bit(GLF_DEMOTE, &gl->gl_flags)) fast_path = 1; } - trace_gfs2_glock_queue(gh, 0); spin_unlock(&gl->gl_spin); if (likely(fast_path)) return; diff --git a/trunk/fs/gfs2/log.c b/trunk/fs/gfs2/log.c index 13c6237c5f67..f2e449c595b4 100644 --- a/trunk/fs/gfs2/log.c +++ b/trunk/fs/gfs2/log.c @@ -28,7 +28,6 @@ #include "meta_io.h" #include "util.h" #include "dir.h" -#include "trace_gfs2.h" #define PULL 1 @@ -314,7 +313,6 @@ int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) gfs2_log_lock(sdp); } atomic_sub(blks, &sdp->sd_log_blks_free); - trace_gfs2_log_blocks(sdp, -blks); gfs2_log_unlock(sdp); mutex_unlock(&sdp->sd_log_reserve_mutex); @@ -335,7 +333,6 @@ void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks) gfs2_log_lock(sdp); atomic_add(blks, &sdp->sd_log_blks_free); - trace_gfs2_log_blocks(sdp, blks); gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks); gfs2_log_unlock(sdp); @@ -561,7 +558,6 @@ static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) gfs2_log_lock(sdp); atomic_add(dist, &sdp->sd_log_blks_free); - trace_gfs2_log_blocks(sdp, dist); gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks); gfs2_log_unlock(sdp); @@ -719,7 +715,6 @@ void __gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl) up_write(&sdp->sd_log_flush_lock); return; } - trace_gfs2_log_flush(sdp, 1); ai = kzalloc(sizeof(struct gfs2_ail), GFP_NOFS | __GFP_NOFAIL); INIT_LIST_HEAD(&ai->ai_ail1_list); @@ -751,7 +746,6 @@ void __gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl) else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ gfs2_log_lock(sdp); atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ - trace_gfs2_log_blocks(sdp, -1); gfs2_log_unlock(sdp); log_write_header(sdp, 0, PULL); } @@ -769,7 +763,7 @@ void __gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl) ai = NULL; } gfs2_log_unlock(sdp); - trace_gfs2_log_flush(sdp, 0); + up_write(&sdp->sd_log_flush_lock); kfree(ai); @@ -793,7 +787,6 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) gfs2_assert_withdraw(sdp, sdp->sd_log_blks_reserved + tr->tr_reserved >= reserved); unused = sdp->sd_log_blks_reserved - reserved + tr->tr_reserved; atomic_add(unused, &sdp->sd_log_blks_free); - trace_gfs2_log_blocks(sdp, unused); gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= sdp->sd_jdesc->jd_blocks); sdp->sd_log_blks_reserved = reserved; diff --git a/trunk/fs/gfs2/lops.c b/trunk/fs/gfs2/lops.c index 9969ff062c5b..00315f50fa46 100644 --- a/trunk/fs/gfs2/lops.c +++ b/trunk/fs/gfs2/lops.c @@ -27,7 +27,6 @@ #include "rgrp.h" #include "trans.h" #include "util.h" -#include "trace_gfs2.h" /** * gfs2_pin - Pin a buffer in memory @@ -54,7 +53,6 @@ static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh) if (bd->bd_ail) list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list); get_bh(bh); - trace_gfs2_pin(bd, 1); } /** @@ -91,7 +89,6 @@ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh, bd->bd_ail = ai; list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list); clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); - trace_gfs2_pin(bd, 0); gfs2_log_unlock(sdp); unlock_buffer(bh); } diff --git a/trunk/fs/gfs2/ops_fstype.c b/trunk/fs/gfs2/ops_fstype.c index 7bc3c45cd676..cc34f271b3e7 100644 --- a/trunk/fs/gfs2/ops_fstype.c +++ b/trunk/fs/gfs2/ops_fstype.c @@ -33,7 +33,6 @@ #include "log.h" #include "quota.h" #include "dir.h" -#include "trace_gfs2.h" #define DO 0 #define UNDO 1 @@ -776,7 +775,6 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) /* Map the extents for this journal's blocks */ map_journal_extents(sdp); } - trace_gfs2_log_blocks(sdp, atomic_read(&sdp->sd_log_blks_free)); if (sdp->sd_lockstruct.ls_first) { unsigned int x; diff --git a/trunk/fs/gfs2/rgrp.c b/trunk/fs/gfs2/rgrp.c index daa4ae341a29..de3239731db8 100644 --- a/trunk/fs/gfs2/rgrp.c +++ b/trunk/fs/gfs2/rgrp.c @@ -29,7 +29,6 @@ #include "util.h" #include "log.h" #include "inode.h" -#include "trace_gfs2.h" #define BFITNOENT ((u32)~0) #define NO_BLOCK ((u64)~0) @@ -1520,7 +1519,7 @@ int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n) spin_lock(&sdp->sd_rindex_spin); rgd->rd_free_clone -= *n; spin_unlock(&sdp->sd_rindex_spin); - trace_gfs2_block_alloc(ip, block, *n, GFS2_BLKST_USED); + *bn = block; return 0; @@ -1572,7 +1571,7 @@ u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation) spin_lock(&sdp->sd_rindex_spin); rgd->rd_free_clone--; spin_unlock(&sdp->sd_rindex_spin); - trace_gfs2_block_alloc(dip, block, 1, GFS2_BLKST_DINODE); + return block; } @@ -1592,7 +1591,7 @@ void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen) rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); if (!rgd) return; - trace_gfs2_block_alloc(ip, bstart, blen, GFS2_BLKST_FREE); + rgd->rd_free += blen; gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); @@ -1620,7 +1619,7 @@ void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen) rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE); if (!rgd) return; - trace_gfs2_block_alloc(ip, bstart, blen, GFS2_BLKST_FREE); + rgd->rd_free += blen; gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); @@ -1643,7 +1642,6 @@ void gfs2_unlink_di(struct inode *inode) rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED); if (!rgd) return; - trace_gfs2_block_alloc(ip, blkno, 1, GFS2_BLKST_UNLINKED); gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); gfs2_trans_add_rg(rgd); @@ -1675,7 +1673,6 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno) void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) { gfs2_free_uninit_di(rgd, ip->i_no_addr); - trace_gfs2_block_alloc(ip, ip->i_no_addr, 1, GFS2_BLKST_FREE); gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid); gfs2_meta_wipe(ip, ip->i_no_addr, 1); } diff --git a/trunk/fs/gfs2/super.c b/trunk/fs/gfs2/super.c index 0a6801336470..c8930b31cdf0 100644 --- a/trunk/fs/gfs2/super.c +++ b/trunk/fs/gfs2/super.c @@ -719,6 +719,8 @@ static void gfs2_put_super(struct super_block *sb) int error; struct gfs2_jdesc *jd; + lock_kernel(); + /* Unfreeze the filesystem, if we need to */ mutex_lock(&sdp->sd_freeze_lock); @@ -785,6 +787,8 @@ static void gfs2_put_super(struct super_block *sb) /* At this point, we're through participating in the lockspace */ gfs2_sys_fs_del(sdp); + + unlock_kernel(); } /** diff --git a/trunk/fs/gfs2/trace_gfs2.h b/trunk/fs/gfs2/trace_gfs2.h deleted file mode 100644 index 98d6ef1c1dc0..000000000000 --- a/trunk/fs/gfs2/trace_gfs2.h +++ /dev/null @@ -1,407 +0,0 @@ -#if !defined(_TRACE_GFS2_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_GFS2_H - -#include - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM gfs2 -#define TRACE_INCLUDE_FILE trace_gfs2 - -#include -#include -#include -#include -#include "incore.h" -#include "glock.h" - -#define dlm_state_name(nn) { DLM_LOCK_##nn, #nn } -#define glock_trace_name(x) __print_symbolic(x, \ - dlm_state_name(IV), \ - dlm_state_name(NL), \ - dlm_state_name(CR), \ - dlm_state_name(CW), \ - dlm_state_name(PR), \ - dlm_state_name(PW), \ - dlm_state_name(EX)) - -#define block_state_name(x) __print_symbolic(x, \ - { GFS2_BLKST_FREE, "free" }, \ - { GFS2_BLKST_USED, "used" }, \ - { GFS2_BLKST_DINODE, "dinode" }, \ - { GFS2_BLKST_UNLINKED, "unlinked" }) - -#define show_glock_flags(flags) __print_flags(flags, "", \ - {(1UL << GLF_LOCK), "l" }, \ - {(1UL << GLF_DEMOTE), "D" }, \ - {(1UL << GLF_PENDING_DEMOTE), "d" }, \ - {(1UL << GLF_DEMOTE_IN_PROGRESS), "p" }, \ - {(1UL << GLF_DIRTY), "y" }, \ - {(1UL << GLF_LFLUSH), "f" }, \ - {(1UL << GLF_INVALIDATE_IN_PROGRESS), "i" }, \ - {(1UL << GLF_REPLY_PENDING), "r" }, \ - {(1UL << GLF_INITIAL), "I" }, \ - {(1UL << GLF_FROZEN), "F" }) - -#ifndef NUMPTY -#define NUMPTY -static inline u8 glock_trace_state(unsigned int state) -{ - switch(state) { - case LM_ST_SHARED: - return DLM_LOCK_PR; - case LM_ST_DEFERRED: - return DLM_LOCK_CW; - case LM_ST_EXCLUSIVE: - return DLM_LOCK_EX; - } - return DLM_LOCK_NL; -} -#endif - -/* Section 1 - Locking - * - * Objectives: - * Latency: Remote demote request to state change - * Latency: Local lock request to state change - * Latency: State change to lock grant - * Correctness: Ordering of local lock state vs. I/O requests - * Correctness: Responses to remote demote requests - */ - -/* General glock state change (DLM lock request completes) */ -TRACE_EVENT(gfs2_glock_state_change, - - TP_PROTO(const struct gfs2_glock *gl, unsigned int new_state), - - TP_ARGS(gl, new_state), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( u64, glnum ) - __field( u32, gltype ) - __field( u8, cur_state ) - __field( u8, new_state ) - __field( u8, dmt_state ) - __field( u8, tgt_state ) - __field( unsigned long, flags ) - ), - - TP_fast_assign( - __entry->dev = gl->gl_sbd->sd_vfs->s_dev; - __entry->glnum = gl->gl_name.ln_number; - __entry->gltype = gl->gl_name.ln_type; - __entry->cur_state = glock_trace_state(gl->gl_state); - __entry->new_state = glock_trace_state(new_state); - __entry->tgt_state = glock_trace_state(gl->gl_target); - __entry->dmt_state = glock_trace_state(gl->gl_demote_state); - __entry->flags = gl->gl_flags; - ), - - TP_printk("%u,%u glock %d:%lld state %s to %s tgt:%s dmt:%s flags:%s", - MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype, - (unsigned long long)__entry->glnum, - glock_trace_name(__entry->cur_state), - glock_trace_name(__entry->new_state), - glock_trace_name(__entry->tgt_state), - glock_trace_name(__entry->dmt_state), - show_glock_flags(__entry->flags)) -); - -/* State change -> unlocked, glock is being deallocated */ -TRACE_EVENT(gfs2_glock_put, - - TP_PROTO(const struct gfs2_glock *gl), - - TP_ARGS(gl), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( u64, glnum ) - __field( u32, gltype ) - __field( u8, cur_state ) - __field( unsigned long, flags ) - ), - - TP_fast_assign( - __entry->dev = gl->gl_sbd->sd_vfs->s_dev; - __entry->gltype = gl->gl_name.ln_type; - __entry->glnum = gl->gl_name.ln_number; - __entry->cur_state = glock_trace_state(gl->gl_state); - __entry->flags = gl->gl_flags; - ), - - TP_printk("%u,%u glock %d:%lld state %s => %s flags:%s", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->gltype, (unsigned long long)__entry->glnum, - glock_trace_name(__entry->cur_state), - glock_trace_name(DLM_LOCK_IV), - show_glock_flags(__entry->flags)) - -); - -/* Callback (local or remote) requesting lock demotion */ -TRACE_EVENT(gfs2_demote_rq, - - TP_PROTO(const struct gfs2_glock *gl), - - TP_ARGS(gl), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( u64, glnum ) - __field( u32, gltype ) - __field( u8, cur_state ) - __field( u8, dmt_state ) - __field( unsigned long, flags ) - ), - - TP_fast_assign( - __entry->dev = gl->gl_sbd->sd_vfs->s_dev; - __entry->gltype = gl->gl_name.ln_type; - __entry->glnum = gl->gl_name.ln_number; - __entry->cur_state = glock_trace_state(gl->gl_state); - __entry->dmt_state = glock_trace_state(gl->gl_demote_state); - __entry->flags = gl->gl_flags; - ), - - TP_printk("%u,%u glock %d:%lld demote %s to %s flags:%s", - MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype, - (unsigned long long)__entry->glnum, - glock_trace_name(__entry->cur_state), - glock_trace_name(__entry->dmt_state), - show_glock_flags(__entry->flags)) - -); - -/* Promotion/grant of a glock */ -TRACE_EVENT(gfs2_promote, - - TP_PROTO(const struct gfs2_holder *gh, int first), - - TP_ARGS(gh, first), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( u64, glnum ) - __field( u32, gltype ) - __field( int, first ) - __field( u8, state ) - ), - - TP_fast_assign( - __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev; - __entry->glnum = gh->gh_gl->gl_name.ln_number; - __entry->gltype = gh->gh_gl->gl_name.ln_type; - __entry->first = first; - __entry->state = glock_trace_state(gh->gh_state); - ), - - TP_printk("%u,%u glock %u:%llu promote %s %s", - MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype, - (unsigned long long)__entry->glnum, - __entry->first ? "first": "other", - glock_trace_name(__entry->state)) -); - -/* Queue/dequeue a lock request */ -TRACE_EVENT(gfs2_glock_queue, - - TP_PROTO(const struct gfs2_holder *gh, int queue), - - TP_ARGS(gh, queue), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( u64, glnum ) - __field( u32, gltype ) - __field( int, queue ) - __field( u8, state ) - ), - - TP_fast_assign( - __entry->dev = gh->gh_gl->gl_sbd->sd_vfs->s_dev; - __entry->glnum = gh->gh_gl->gl_name.ln_number; - __entry->gltype = gh->gh_gl->gl_name.ln_type; - __entry->queue = queue; - __entry->state = glock_trace_state(gh->gh_state); - ), - - TP_printk("%u,%u glock %u:%llu %squeue %s", - MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype, - (unsigned long long)__entry->glnum, - __entry->queue ? "" : "de", - glock_trace_name(__entry->state)) -); - -/* Section 2 - Log/journal - * - * Objectives: - * Latency: Log flush time - * Correctness: pin/unpin vs. disk I/O ordering - * Performance: Log usage stats - */ - -/* Pin/unpin a block in the log */ -TRACE_EVENT(gfs2_pin, - - TP_PROTO(const struct gfs2_bufdata *bd, int pin), - - TP_ARGS(bd, pin), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( int, pin ) - __field( u32, len ) - __field( sector_t, block ) - __field( u64, ino ) - ), - - TP_fast_assign( - __entry->dev = bd->bd_gl->gl_sbd->sd_vfs->s_dev; - __entry->pin = pin; - __entry->len = bd->bd_bh->b_size; - __entry->block = bd->bd_bh->b_blocknr; - __entry->ino = bd->bd_gl->gl_name.ln_number; - ), - - TP_printk("%u,%u log %s %llu/%lu inode %llu", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->pin ? "pin" : "unpin", - (unsigned long long)__entry->block, - (unsigned long)__entry->len, - (unsigned long long)__entry->ino) -); - -/* Flushing the log */ -TRACE_EVENT(gfs2_log_flush, - - TP_PROTO(const struct gfs2_sbd *sdp, int start), - - TP_ARGS(sdp, start), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( int, start ) - __field( u64, log_seq ) - ), - - TP_fast_assign( - __entry->dev = sdp->sd_vfs->s_dev; - __entry->start = start; - __entry->log_seq = sdp->sd_log_sequence; - ), - - TP_printk("%u,%u log flush %s %llu", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->start ? "start" : "end", - (unsigned long long)__entry->log_seq) -); - -/* Reserving/releasing blocks in the log */ -TRACE_EVENT(gfs2_log_blocks, - - TP_PROTO(const struct gfs2_sbd *sdp, int blocks), - - TP_ARGS(sdp, blocks), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( int, blocks ) - ), - - TP_fast_assign( - __entry->dev = sdp->sd_vfs->s_dev; - __entry->blocks = blocks; - ), - - TP_printk("%u,%u log reserve %d", MAJOR(__entry->dev), - MINOR(__entry->dev), __entry->blocks) -); - -/* Section 3 - bmap - * - * Objectives: - * Latency: Bmap request time - * Performance: Block allocator tracing - * Correctness: Test of disard generation vs. blocks allocated - */ - -/* Map an extent of blocks, possibly a new allocation */ -TRACE_EVENT(gfs2_bmap, - - TP_PROTO(const struct gfs2_inode *ip, const struct buffer_head *bh, - sector_t lblock, int create, int errno), - - TP_ARGS(ip, bh, lblock, create, errno), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( sector_t, lblock ) - __field( sector_t, pblock ) - __field( u64, inum ) - __field( unsigned long, state ) - __field( u32, len ) - __field( int, create ) - __field( int, errno ) - ), - - TP_fast_assign( - __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev; - __entry->lblock = lblock; - __entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0; - __entry->inum = ip->i_no_addr; - __entry->state = bh->b_state; - __entry->len = bh->b_size; - __entry->create = create; - __entry->errno = errno; - ), - - TP_printk("%u,%u bmap %llu map %llu/%lu to %llu flags:%08lx %s %d", - MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long long)__entry->inum, - (unsigned long long)__entry->lblock, - (unsigned long)__entry->len, - (unsigned long long)__entry->pblock, - __entry->state, __entry->create ? "create " : "nocreate", - __entry->errno) -); - -/* Keep track of blocks as they are allocated/freed */ -TRACE_EVENT(gfs2_block_alloc, - - TP_PROTO(const struct gfs2_inode *ip, u64 block, unsigned len, - u8 block_state), - - TP_ARGS(ip, block, len, block_state), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( u64, start ) - __field( u64, inum ) - __field( u32, len ) - __field( u8, block_state ) - ), - - TP_fast_assign( - __entry->dev = ip->i_gl->gl_sbd->sd_vfs->s_dev; - __entry->start = block; - __entry->inum = ip->i_no_addr; - __entry->len = len; - __entry->block_state = block_state; - ), - - TP_printk("%u,%u bmap %llu alloc %llu/%lu %s", - MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long long)__entry->inum, - (unsigned long long)__entry->start, - (unsigned long)__entry->len, - block_state_name(__entry->block_state)) -); - -#endif /* _TRACE_GFS2_H */ - -/* This part must be outside protection */ -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . -#include - diff --git a/trunk/fs/partitions/check.c b/trunk/fs/partitions/check.c index 1a9c7878f864..0af36085eb28 100644 --- a/trunk/fs/partitions/check.c +++ b/trunk/fs/partitions/check.c @@ -556,49 +556,27 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev) /* add partitions */ for (p = 1; p < state->limit; p++) { - sector_t size, from; -try_scan: - size = state->parts[p].size; + sector_t size = state->parts[p].size; + sector_t from = state->parts[p].from; if (!size) continue; - - from = state->parts[p].from; if (from >= get_capacity(disk)) { printk(KERN_WARNING "%s: p%d ignored, start %llu is behind the end of the disk\n", disk->disk_name, p, (unsigned long long) from); continue; } - if (from + size > get_capacity(disk)) { - struct block_device_operations *bdops = disk->fops; - unsigned long long capacity; - + /* + * we can not ignore partitions of broken tables + * created by for example camera firmware, but we + * limit them to the end of the disk to avoid + * creating invalid block devices + */ printk(KERN_WARNING - "%s: p%d size %llu exceeds device capacity, ", + "%s: p%d size %llu limited to end of disk\n", disk->disk_name, p, (unsigned long long) size); - - if (bdops->set_capacity && - (disk->flags & GENHD_FL_NATIVE_CAPACITY) == 0) { - printk(KERN_CONT "enabling native capacity\n"); - capacity = bdops->set_capacity(disk, ~0ULL); - disk->flags |= GENHD_FL_NATIVE_CAPACITY; - if (capacity > get_capacity(disk)) { - set_capacity(disk, capacity); - check_disk_size_change(disk, bdev); - bdev->bd_invalidated = 0; - } - goto try_scan; - } else { - /* - * we can not ignore partitions of broken tables - * created by for example camera firmware, but - * we limit them to the end of the disk to avoid - * creating invalid block devices - */ - printk(KERN_CONT "limited to end of disk\n"); - size = get_capacity(disk) - from; - } + size = get_capacity(disk) - from; } part = add_partition(disk, p, from, size, state->parts[p].flags); diff --git a/trunk/include/linux/blkdev.h b/trunk/include/linux/blkdev.h index 0b1a6cae9de1..ebdfde8fe556 100644 --- a/trunk/include/linux/blkdev.h +++ b/trunk/include/linux/blkdev.h @@ -1226,8 +1226,6 @@ struct block_device_operations { int (*direct_access) (struct block_device *, sector_t, void **, unsigned long *); int (*media_changed) (struct gendisk *); - unsigned long long (*set_capacity) (struct gendisk *, - unsigned long long); int (*revalidate_disk) (struct gendisk *); int (*getgeo)(struct block_device *, struct hd_geometry *); struct module *owner; diff --git a/trunk/include/linux/compiler.h b/trunk/include/linux/compiler.h index 04fb5135b4e1..37bcb50a4d7c 100644 --- a/trunk/include/linux/compiler.h +++ b/trunk/include/linux/compiler.h @@ -261,11 +261,6 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); # define __section(S) __attribute__ ((__section__(#S))) #endif -/* Are two types/vars the same type (ignoring qualifiers)? */ -#ifndef __same_type -# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) -#endif - /* * Prevent the compiler from merging or refetching accesses. The compiler * is also forbidden from reordering successive instances of ACCESS_ONCE(), diff --git a/trunk/include/linux/fuse.h b/trunk/include/linux/fuse.h index d41ed593f79f..162e5defe683 100644 --- a/trunk/include/linux/fuse.h +++ b/trunk/include/linux/fuse.h @@ -120,13 +120,6 @@ struct fuse_file_lock { #define FUSE_EXPORT_SUPPORT (1 << 4) #define FUSE_BIG_WRITES (1 << 5) -/** - * CUSE INIT request/reply flags - * - * CUSE_UNRESTRICTED_IOCTL: use unrestricted ioctl - */ -#define CUSE_UNRESTRICTED_IOCTL (1 << 0) - /** * Release flags */ @@ -217,9 +210,6 @@ enum fuse_opcode { FUSE_DESTROY = 38, FUSE_IOCTL = 39, FUSE_POLL = 40, - - /* CUSE specific operations */ - CUSE_INIT = 4096, }; enum fuse_notify_code { @@ -411,27 +401,6 @@ struct fuse_init_out { __u32 max_write; }; -#define CUSE_INIT_INFO_MAX 4096 - -struct cuse_init_in { - __u32 major; - __u32 minor; - __u32 unused; - __u32 flags; -}; - -struct cuse_init_out { - __u32 major; - __u32 minor; - __u32 unused; - __u32 flags; - __u32 max_read; - __u32 max_write; - __u32 dev_major; /* chardev major */ - __u32 dev_minor; /* chardev minor */ - __u32 spare[10]; -}; - struct fuse_interrupt_in { __u64 unique; }; diff --git a/trunk/include/linux/genhd.h b/trunk/include/linux/genhd.h index 7cbd38d363a2..149fda264c86 100644 --- a/trunk/include/linux/genhd.h +++ b/trunk/include/linux/genhd.h @@ -114,7 +114,6 @@ struct hd_struct { #define GENHD_FL_UP 16 #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 #define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ -#define GENHD_FL_NATIVE_CAPACITY 128 #define BLK_SCSI_MAX_CMDS (256) #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) diff --git a/trunk/include/linux/ide.h b/trunk/include/linux/ide.h index a6c6a2fad7c8..867cb68d8461 100644 --- a/trunk/include/linux/ide.h +++ b/trunk/include/linux/ide.h @@ -178,7 +178,7 @@ typedef u8 hwif_chipset_t; /* * Structure to hold all information about the location of this port */ -struct ide_hw { +typedef struct hw_regs_s { union { struct ide_io_ports io_ports; unsigned long io_ports_array[IDE_NR_PORTS]; @@ -186,11 +186,12 @@ struct ide_hw { int irq; /* our irq number */ ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ + hwif_chipset_t chipset; struct device *dev, *parent; unsigned long config; -}; +} hw_regs_t; -static inline void ide_std_init_ports(struct ide_hw *hw, +static inline void ide_std_init_ports(hw_regs_t *hw, unsigned long io_addr, unsigned long ctl_addr) { @@ -217,12 +218,21 @@ static inline void ide_std_init_ports(struct ide_hw *hw, /* * Special Driver Flags + * + * set_geometry : respecify drive geometry + * recalibrate : seek to cyl 0 + * set_multmode : set multmode count + * reserved : unused */ -enum { - IDE_SFLAG_SET_GEOMETRY = (1 << 0), - IDE_SFLAG_RECALIBRATE = (1 << 1), - IDE_SFLAG_SET_MULTMODE = (1 << 2), -}; +typedef union { + unsigned all : 8; + struct { + unsigned set_geometry : 1; + unsigned recalibrate : 1; + unsigned set_multmode : 1; + unsigned reserved : 5; + } b; +} special_t; /* * Status returned from various ide_ functions @@ -381,7 +391,6 @@ struct ide_drive_s; struct ide_disk_ops { int (*check)(struct ide_drive_s *, const char *); int (*get_capacity)(struct ide_drive_s *); - u64 (*set_capacity)(struct ide_drive_s *, u64); void (*setup)(struct ide_drive_s *); void (*flush)(struct ide_drive_s *); int (*init_media)(struct ide_drive_s *, struct gendisk *); @@ -459,8 +468,6 @@ enum { IDE_DFLAG_NICE1 = (1 << 5), /* device is physically present */ IDE_DFLAG_PRESENT = (1 << 6), - /* disable Host Protected Area */ - IDE_DFLAG_NOHPA = (1 << 7), /* id read from device (synthetic if not set) */ IDE_DFLAG_ID_READ = (1 << 8), IDE_DFLAG_NOPROBE = (1 << 9), @@ -499,7 +506,6 @@ enum { /* write protect */ IDE_DFLAG_WP = (1 << 29), IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30), - IDE_DFLAG_NIEN_QUIRK = (1 << 31), }; struct ide_drive_s { @@ -524,13 +530,14 @@ struct ide_drive_s { unsigned long sleep; /* sleep until this time */ unsigned long timeout; /* max time to wait for irq */ - u8 special_flags; /* special action flags */ + special_t special; /* special action flags */ u8 select; /* basic drive/head select reg value */ u8 retry_pio; /* retrying dma capable host in pio */ u8 waiting_for_dma; /* dma currently in progress */ u8 dma; /* atapi dma flag */ + u8 quirk_list; /* considered quirky, set for a specific host */ u8 init_speed; /* transfer rate set at boot */ u8 current_speed; /* current transfer rate set */ u8 desired_speed; /* desired transfer rate set */ @@ -555,7 +562,8 @@ struct ide_drive_s { unsigned int drive_data; /* used by set_pio_mode/dev_select() */ unsigned int failures; /* current failure count */ unsigned int max_failures; /* maximum allowed failure count */ - u64 probed_capacity;/* initial/native media capacity */ + u64 probed_capacity;/* initial reported media capacity (ide-cd only currently) */ + u64 capacity64; /* total number of sectors */ int lun; /* logical unit */ @@ -1214,7 +1222,7 @@ static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev) } void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, - struct ide_hw *, struct ide_hw **); + hw_regs_t *, hw_regs_t **); void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); #ifdef CONFIG_BLK_DEV_IDEDMA_PCI @@ -1453,18 +1461,16 @@ static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {} void ide_register_region(struct gendisk *); void ide_unregister_region(struct gendisk *); -void ide_check_nien_quirk_list(ide_drive_t *); void ide_undecoded_slave(ide_drive_t *); void ide_port_apply_params(ide_hwif_t *); int ide_sysfs_register_port(ide_hwif_t *); -struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **, - unsigned int); +struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **); void ide_host_free(struct ide_host *); int ide_host_register(struct ide_host *, const struct ide_port_info *, - struct ide_hw **); -int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int, + hw_regs_t **); +int ide_host_add(const struct ide_port_info *, hw_regs_t **, struct ide_host **); void ide_host_remove(struct ide_host *); int ide_legacy_device_add(const struct ide_port_info *, unsigned long); diff --git a/trunk/include/linux/if_ether.h b/trunk/include/linux/if_ether.h index 60e8934d10b5..cfe4fe1b7132 100644 --- a/trunk/include/linux/if_ether.h +++ b/trunk/include/linux/if_ether.h @@ -79,7 +79,6 @@ #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ #define ETH_P_TIPC 0x88CA /* TIPC */ #define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */ -#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */ #define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ /* diff --git a/trunk/include/linux/lguest.h b/trunk/include/linux/lguest.h index 7bc1440fc473..175e63f4a8c0 100644 --- a/trunk/include/linux/lguest.h +++ b/trunk/include/linux/lguest.h @@ -30,10 +30,6 @@ struct lguest_data /* Wallclock time set by the Host. */ struct timespec time; - /* Interrupt pending set by the Host. The Guest should do a hypercall - * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). */ - int irq_pending; - /* Async hypercall ring. Instead of directly making hypercalls, we can * place them in here for processing the next time the Host wants. * This batching can be quite efficient. */ diff --git a/trunk/include/linux/lguest_launcher.h b/trunk/include/linux/lguest_launcher.h index bfefbdf7498a..a53407a4165c 100644 --- a/trunk/include/linux/lguest_launcher.h +++ b/trunk/include/linux/lguest_launcher.h @@ -57,8 +57,7 @@ enum lguest_req LHREQ_INITIALIZE, /* + base, pfnlimit, start */ LHREQ_GETDMA, /* No longer used */ LHREQ_IRQ, /* + irq */ - LHREQ_BREAK, /* No longer used */ - LHREQ_EVENTFD, /* + address, fd. */ + LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ }; /* The alignment to use between consumer and producer parts of vring. diff --git a/trunk/include/linux/module.h b/trunk/include/linux/module.h index a7bc6e7b43a7..a8f2c0aa4c32 100644 --- a/trunk/include/linux/module.h +++ b/trunk/include/linux/module.h @@ -77,7 +77,6 @@ search_extable(const struct exception_table_entry *first, void sort_extable(struct exception_table_entry *start, struct exception_table_entry *finish); void sort_main_extable(void); -void trim_init_extable(struct module *m); #ifdef MODULE #define MODULE_GENERIC_TABLE(gtype,name) \ diff --git a/trunk/include/linux/moduleparam.h b/trunk/include/linux/moduleparam.h index 6547c3cdbc4c..a4f0b931846c 100644 --- a/trunk/include/linux/moduleparam.h +++ b/trunk/include/linux/moduleparam.h @@ -36,14 +36,9 @@ typedef int (*param_set_fn)(const char *val, struct kernel_param *kp); /* Returns length written or -errno. Buffer is 4k (ie. be short!) */ typedef int (*param_get_fn)(char *buffer, struct kernel_param *kp); -/* Flag bits for kernel_param.flags */ -#define KPARAM_KMALLOCED 1 -#define KPARAM_ISBOOL 2 - struct kernel_param { const char *name; - u16 perm; - u16 flags; + unsigned int perm; param_set_fn set; param_get_fn get; union { @@ -84,7 +79,7 @@ struct kparam_array parameters. perm sets the visibility in sysfs: 000 means it's not there, read bits mean it's readable, write bits mean it's writable. */ -#define __module_param_call(prefix, name, set, get, arg, isbool, perm) \ +#define __module_param_call(prefix, name, set, get, arg, perm) \ /* Default value instead of permissions? */ \ static int __param_perm_check_##name __attribute__((unused)) = \ BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)) \ @@ -93,13 +88,10 @@ struct kparam_array static struct kernel_param __moduleparam_const __param_##name \ __used \ __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ - = { __param_str_##name, perm, isbool ? KPARAM_ISBOOL : 0, \ - set, get, { arg } } + = { __param_str_##name, perm, set, get, { arg } } #define module_param_call(name, set, get, arg, perm) \ - __module_param_call(MODULE_PARAM_PREFIX, \ - name, set, get, arg, \ - __same_type(*(arg), bool), perm) + __module_param_call(MODULE_PARAM_PREFIX, name, set, get, arg, perm) /* Helper functions: type is byte, short, ushort, int, uint, long, ulong, charp, bool or invbool, or XXX if you define param_get_XXX, @@ -128,16 +120,15 @@ struct kparam_array #define core_param(name, var, type, perm) \ param_check_##type(name, &(var)); \ __module_param_call("", name, param_set_##type, param_get_##type, \ - &var, __same_type(var, bool), perm) + &var, perm) #endif /* !MODULE */ /* Actually copy string: maxlen param is usually sizeof(string). */ #define module_param_string(name, string, len, perm) \ static const struct kparam_string __param_string_##name \ = { len, string }; \ - __module_param_call(MODULE_PARAM_PREFIX, name, \ - param_set_copystring, param_get_string, \ - .str = &__param_string_##name, 0, perm); \ + module_param_call(name, param_set_copystring, param_get_string, \ + .str = &__param_string_##name, perm); \ __MODULE_PARM_TYPE(name, "string") /* Called on module insert or kernel boot */ @@ -195,30 +186,21 @@ extern int param_set_charp(const char *val, struct kernel_param *kp); extern int param_get_charp(char *buffer, struct kernel_param *kp); #define param_check_charp(name, p) __param_check(name, p, char *) -/* For historical reasons "bool" parameters can be (unsigned) "int". */ extern int param_set_bool(const char *val, struct kernel_param *kp); extern int param_get_bool(char *buffer, struct kernel_param *kp); -#define param_check_bool(name, p) \ - static inline void __check_##name(void) \ - { \ - BUILD_BUG_ON(!__same_type(*(p), bool) && \ - !__same_type(*(p), unsigned int) && \ - !__same_type(*(p), int)); \ - } +#define param_check_bool(name, p) __param_check(name, p, int) extern int param_set_invbool(const char *val, struct kernel_param *kp); extern int param_get_invbool(char *buffer, struct kernel_param *kp); -#define param_check_invbool(name, p) __param_check(name, p, bool) +#define param_check_invbool(name, p) __param_check(name, p, int) /* Comma-separated array: *nump is set to number they actually specified. */ #define module_param_array_named(name, array, type, nump, perm) \ static const struct kparam_array __param_arr_##name \ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\ sizeof(array[0]), array }; \ - __module_param_call(MODULE_PARAM_PREFIX, name, \ - param_array_set, param_array_get, \ - .arr = &__param_arr_##name, \ - __same_type(array[0], bool), perm); \ + module_param_call(name, param_array_set, param_array_get, \ + .arr = &__param_arr_##name, perm); \ __MODULE_PARM_TYPE(name, "array of " #type) #define module_param_array(name, type, nump, perm) \ diff --git a/trunk/include/linux/page_cgroup.h b/trunk/include/linux/page_cgroup.h index 7339c7bf7331..13f126c89ae8 100644 --- a/trunk/include/linux/page_cgroup.h +++ b/trunk/include/linux/page_cgroup.h @@ -18,7 +18,19 @@ struct page_cgroup { }; void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat); -void __init page_cgroup_init(void); + +#ifdef CONFIG_SPARSEMEM +static inline void __init page_cgroup_init_flatmem(void) +{ +} +extern void __init page_cgroup_init(void); +#else +void __init page_cgroup_init_flatmem(void); +static inline void __init page_cgroup_init(void) +{ +} +#endif + struct page_cgroup *lookup_page_cgroup(struct page *page); enum { @@ -87,6 +99,10 @@ static inline void page_cgroup_init(void) { } +static inline void __init page_cgroup_init_flatmem(void) +{ +} + #endif #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP diff --git a/trunk/include/linux/virtio.h b/trunk/include/linux/virtio.h index 4fca4f5440ba..06005fa9e982 100644 --- a/trunk/include/linux/virtio.h +++ b/trunk/include/linux/virtio.h @@ -10,17 +10,14 @@ /** * virtqueue - a queue to register buffers for sending or receiving. - * @list: the chain of virtqueues for this device * @callback: the function to call when buffers are consumed (can be NULL). - * @name: the name of this virtqueue (mainly for debugging) * @vdev: the virtio device this queue was created for. * @vq_ops: the operations for this virtqueue (see below). * @priv: a pointer for the virtqueue implementation to use. */ -struct virtqueue { - struct list_head list; +struct virtqueue +{ void (*callback)(struct virtqueue *vq); - const char *name; struct virtio_device *vdev; struct virtqueue_ops *vq_ops; void *priv; @@ -79,16 +76,15 @@ struct virtqueue_ops { * @dev: underlying device. * @id: the device type identification (used to match it with a driver). * @config: the configuration ops for this device. - * @vqs: the list of virtqueues for this device. * @features: the features supported by both driver and device. * @priv: private pointer for the driver's use. */ -struct virtio_device { +struct virtio_device +{ int index; struct device dev; struct virtio_device_id id; struct virtio_config_ops *config; - struct list_head vqs; /* Note that this is a Linux set_bit-style bitmap. */ unsigned long features[1]; void *priv; @@ -103,7 +99,8 @@ void unregister_virtio_device(struct virtio_device *dev); * @id_table: the ids serviced by this driver. * @feature_table: an array of feature numbers supported by this device. * @feature_table_size: number of entries in the feature table array. - * @probe: the function to call when a device is found. Returns 0 or -errno. + * @probe: the function to call when a device is found. Returns a token for + * remove, or PTR_ERR(). * @remove: the function when a device is removed. * @config_changed: optional function to call when the device configuration * changes; may be called in interrupt context. diff --git a/trunk/include/linux/virtio_config.h b/trunk/include/linux/virtio_config.h index 99f514575f6a..bf8ec283b232 100644 --- a/trunk/include/linux/virtio_config.h +++ b/trunk/include/linux/virtio_config.h @@ -29,7 +29,6 @@ #define VIRTIO_F_NOTIFY_ON_EMPTY 24 #ifdef __KERNEL__ -#include #include /** @@ -50,26 +49,15 @@ * @set_status: write the status byte * vdev: the virtio_device * status: the new status byte - * @request_vqs: request the specified number of virtqueues - * vdev: the virtio_device - * max_vqs: the max number of virtqueues we want - * If supplied, must call before any virtqueues are instantiated. - * To modify the max number of virtqueues after request_vqs has been - * called, call free_vqs and then request_vqs with a new value. - * @free_vqs: cleanup resources allocated by request_vqs - * vdev: the virtio_device - * If supplied, must call after all virtqueues have been deleted. * @reset: reset the device * vdev: the virtio device * After this, status and feature negotiation must be done again - * @find_vqs: find virtqueues and instantiate them. + * @find_vq: find a virtqueue and instantiate it. * vdev: the virtio_device - * nvqs: the number of virtqueues to find - * vqs: on success, includes new virtqueues - * callbacks: array of callbacks, for each virtqueue - * names: array of virtqueue names (mainly for debugging) - * Returns 0 on success or error status - * @del_vqs: free virtqueues found by find_vqs(). + * index: the 0-based virtqueue number in case there's more than one. + * callback: the virqtueue callback + * Returns the new virtqueue or ERR_PTR() (eg. -ENOENT). + * @del_vq: free a virtqueue found by find_vq(). * @get_features: get the array of feature bits for this device. * vdev: the virtio_device * Returns the first 32 feature bits (all we currently need). @@ -78,7 +66,6 @@ * This gives the final feature bits for the device: it can change * the dev->feature bits if it wants. */ -typedef void vq_callback_t(struct virtqueue *); struct virtio_config_ops { void (*get)(struct virtio_device *vdev, unsigned offset, @@ -88,11 +75,10 @@ struct virtio_config_ops u8 (*get_status)(struct virtio_device *vdev); void (*set_status)(struct virtio_device *vdev, u8 status); void (*reset)(struct virtio_device *vdev); - int (*find_vqs)(struct virtio_device *, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char *names[]); - void (*del_vqs)(struct virtio_device *); + struct virtqueue *(*find_vq)(struct virtio_device *vdev, + unsigned index, + void (*callback)(struct virtqueue *)); + void (*del_vq)(struct virtqueue *vq); u32 (*get_features)(struct virtio_device *vdev); void (*finalize_features)(struct virtio_device *vdev); }; @@ -113,9 +99,7 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev, if (__builtin_constant_p(fbit)) BUILD_BUG_ON(fbit >= 32); - if (fbit < VIRTIO_TRANSPORT_F_START) - virtio_check_driver_offered_feature(vdev, fbit); - + virtio_check_driver_offered_feature(vdev, fbit); return test_bit(fbit, vdev->features); } @@ -142,18 +126,5 @@ static inline int virtio_config_buf(struct virtio_device *vdev, vdev->config->get(vdev, offset, buf, len); return 0; } - -static inline -struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, - vq_callback_t *c, const char *n) -{ - vq_callback_t *callbacks[] = { c }; - const char *names[] = { n }; - struct virtqueue *vq; - int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names); - if (err < 0) - return ERR_PTR(err); - return vq; -} #endif /* __KERNEL__ */ #endif /* _LINUX_VIRTIO_CONFIG_H */ diff --git a/trunk/include/linux/virtio_pci.h b/trunk/include/linux/virtio_pci.h index 9a3d7c48c622..cd0fd5d181a6 100644 --- a/trunk/include/linux/virtio_pci.h +++ b/trunk/include/linux/virtio_pci.h @@ -47,17 +47,9 @@ /* The bit of the ISR which indicates a device configuration change. */ #define VIRTIO_PCI_ISR_CONFIG 0x2 -/* MSI-X registers: only enabled if MSI-X is enabled. */ -/* A 16-bit vector for configuration changes. */ -#define VIRTIO_MSI_CONFIG_VECTOR 20 -/* A 16-bit vector for selected queue notifications. */ -#define VIRTIO_MSI_QUEUE_VECTOR 22 -/* Vector value used to disable MSI for queue */ -#define VIRTIO_MSI_NO_VECTOR 0xffff - /* The remaining space is defined by each driver as the per-driver * configuration space */ -#define VIRTIO_PCI_CONFIG(dev) ((dev)->msix_enabled ? 24 : 20) +#define VIRTIO_PCI_CONFIG 20 /* Virtio ABI version, this must match exactly */ #define VIRTIO_PCI_ABI_VERSION 0 diff --git a/trunk/include/linux/virtio_ring.h b/trunk/include/linux/virtio_ring.h index 693e0ec5afa6..71e03722fb59 100644 --- a/trunk/include/linux/virtio_ring.h +++ b/trunk/include/linux/virtio_ring.h @@ -14,8 +14,6 @@ #define VRING_DESC_F_NEXT 1 /* This marks a buffer as write-only (otherwise read-only). */ #define VRING_DESC_F_WRITE 2 -/* This means the buffer contains a list of buffer descriptors. */ -#define VRING_DESC_F_INDIRECT 4 /* The Host uses this in used->flags to advise the Guest: don't kick me when * you add a buffer. It's unreliable, so it's simply an optimization. Guest @@ -26,9 +24,6 @@ * optimization. */ #define VRING_AVAIL_F_NO_INTERRUPT 1 -/* We support indirect buffer descriptors */ -#define VIRTIO_RING_F_INDIRECT_DESC 28 - /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ struct vring_desc { @@ -124,8 +119,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, struct virtio_device *vdev, void *pages, void (*notify)(struct virtqueue *vq), - void (*callback)(struct virtqueue *vq), - const char *name); + void (*callback)(struct virtqueue *vq)); void vring_del_virtqueue(struct virtqueue *vq); /* Filter out transport-specific feature bits. */ void vring_transport_features(struct virtio_device *vdev); diff --git a/trunk/include/scsi/fc/fc_fip.h b/trunk/include/scsi/fc/fc_fip.h index 3d138c1fcf8a..0627a9ae6347 100644 --- a/trunk/include/scsi/fc/fc_fip.h +++ b/trunk/include/scsi/fc/fc_fip.h @@ -22,6 +22,13 @@ * http://www.t11.org/ftp/t11/pub/fc/bb-5/08-543v1.pdf */ +/* + * The FIP ethertype eventually goes in net/if_ether.h. + */ +#ifndef ETH_P_FIP +#define ETH_P_FIP 0x8914 /* FIP Ethertype */ +#endif + #define FIP_DEF_PRI 128 /* default selection priority */ #define FIP_DEF_FC_MAP 0x0efc00 /* default FCoE MAP (MAC OUI) value */ #define FIP_DEF_FKA 8000 /* default FCF keep-alive/advert period (mS) */ diff --git a/trunk/include/scsi/iscsi_if.h b/trunk/include/scsi/iscsi_if.h index 4426f00da5ff..d0ed5226f8c4 100644 --- a/trunk/include/scsi/iscsi_if.h +++ b/trunk/include/scsi/iscsi_if.h @@ -22,11 +22,6 @@ #define ISCSI_IF_H #include -#include -#include - -#define ISCSI_NL_GRP_ISCSID 1 -#define ISCSI_NL_GRP_UIP 2 #define UEVENT_BASE 10 #define KEVENT_BASE 100 @@ -55,10 +50,7 @@ enum iscsi_uevent_e { ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15, ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16, ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17, - ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18, - ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST = UEVENT_BASE + 19, - - ISCSI_UEVENT_PATH_UPDATE = UEVENT_BASE + 20, + ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18, /* up events */ ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1, @@ -67,9 +59,6 @@ enum iscsi_uevent_e { ISCSI_KEVENT_DESTROY_SESSION = KEVENT_BASE + 4, ISCSI_KEVENT_UNBIND_SESSION = KEVENT_BASE + 5, ISCSI_KEVENT_CREATE_SESSION = KEVENT_BASE + 6, - - ISCSI_KEVENT_PATH_REQ = KEVENT_BASE + 7, - ISCSI_KEVENT_IF_DOWN = KEVENT_BASE + 8, }; enum iscsi_tgt_dscvr { @@ -142,10 +131,6 @@ struct iscsi_uevent { struct msg_transport_connect { uint32_t non_blocking; } ep_connect; - struct msg_transport_connect_through_host { - uint32_t host_no; - uint32_t non_blocking; - } ep_connect_through_host; struct msg_transport_poll { uint64_t ep_handle; uint32_t timeout_ms; @@ -169,9 +154,6 @@ struct iscsi_uevent { uint32_t param; /* enum iscsi_host_param */ uint32_t len; } set_host_param; - struct msg_set_path { - uint32_t host_no; - } set_path; } u; union { /* messages k -> u */ @@ -205,38 +187,9 @@ struct iscsi_uevent { struct msg_transport_connect_ret { uint64_t handle; } ep_connect_ret; - struct msg_req_path { - uint32_t host_no; - } req_path; - struct msg_notify_if_down { - uint32_t host_no; - } notify_if_down; } r; } __attribute__ ((aligned (sizeof(uint64_t)))); -/* - * To keep the struct iscsi_uevent size the same for userspace code - * compatibility, the main structure for ISCSI_UEVENT_PATH_UPDATE and - * ISCSI_KEVENT_PATH_REQ is defined separately and comes after the - * struct iscsi_uevent in the NETLINK_ISCSI message. - */ -struct iscsi_path { - uint64_t handle; - uint8_t mac_addr[6]; - uint8_t mac_addr_old[6]; - uint32_t ip_addr_len; /* 4 or 16 */ - union { - struct in_addr v4_addr; - struct in6_addr v6_addr; - } src; - union { - struct in_addr v4_addr; - struct in6_addr v6_addr; - } dst; - uint16_t vlan_id; - uint16_t pmtu; -} __attribute__ ((aligned (sizeof(uint64_t)))); - /* * Common error codes */ diff --git a/trunk/include/scsi/libfc.h b/trunk/include/scsi/libfc.h index ebdd9f4cf070..45f9cc642c46 100644 --- a/trunk/include/scsi/libfc.h +++ b/trunk/include/scsi/libfc.h @@ -679,7 +679,6 @@ struct fc_lport { unsigned int e_d_tov; unsigned int r_a_tov; u8 max_retry_count; - u8 max_rport_retry_count; u16 link_speed; u16 link_supported_speeds; u16 lro_xid; /* max xid for fcoe lro */ diff --git a/trunk/include/scsi/libiscsi.h b/trunk/include/scsi/libiscsi.h index 196525cd402f..0289f5745fb9 100644 --- a/trunk/include/scsi/libiscsi.h +++ b/trunk/include/scsi/libiscsi.h @@ -82,12 +82,9 @@ enum { enum { - ISCSI_TASK_FREE, ISCSI_TASK_COMPLETED, ISCSI_TASK_PENDING, ISCSI_TASK_RUNNING, - ISCSI_TASK_ABRT_TMF, /* aborted due to TMF */ - ISCSI_TASK_ABRT_SESS_RECOV, /* aborted due to session recovery */ }; struct iscsi_r2t_info { @@ -184,7 +181,9 @@ struct iscsi_conn { /* xmit */ struct list_head mgmtqueue; /* mgmt (control) xmit queue */ - struct list_head cmdqueue; /* data-path cmd queue */ + struct list_head mgmt_run_list; /* list of control tasks */ + struct list_head xmitqueue; /* data-path cmd queue */ + struct list_head run_list; /* list of cmds in progress */ struct list_head requeue; /* tasks needing another run */ struct work_struct xmitwork; /* per-conn. xmit workqueue */ unsigned long suspend_tx; /* suspend Tx */ @@ -407,7 +406,6 @@ extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *, char *, int); extern int iscsi_verify_itt(struct iscsi_conn *, itt_t); extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t); -extern struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *, itt_t); extern void iscsi_requeue_task(struct iscsi_task *task); extern void iscsi_put_task(struct iscsi_task *task); extern void __iscsi_get_task(struct iscsi_task *task); diff --git a/trunk/include/scsi/osd_attributes.h b/trunk/include/scsi/osd_attributes.h index 56e920ade326..f888a6fda073 100644 --- a/trunk/include/scsi/osd_attributes.h +++ b/trunk/include/scsi/osd_attributes.h @@ -29,7 +29,6 @@ enum { OSD_APAGE_PARTITION_INFORMATION = OSD_APAGE_PARTITION_FIRST + 1, OSD_APAGE_PARTITION_QUOTAS = OSD_APAGE_PARTITION_FIRST + 2, OSD_APAGE_PARTITION_TIMESTAMP = OSD_APAGE_PARTITION_FIRST + 3, - OSD_APAGE_PARTITION_ATTR_ACCESS = OSD_APAGE_PARTITION_FIRST + 4, OSD_APAGE_PARTITION_SECURITY = OSD_APAGE_PARTITION_FIRST + 5, OSD_APAGE_PARTITION_LAST = 0x5FFFFFFF, @@ -52,9 +51,7 @@ enum { OSD_APAGE_RESERVED_TYPE_LAST = 0xEFFFFFFF, OSD_APAGE_COMMON_FIRST = 0xF0000000, - OSD_APAGE_COMMON_LAST = 0xFFFFFFFD, - - OSD_APAGE_CURRENT_COMMAND = 0xFFFFFFFE, + OSD_APAGE_COMMON_LAST = 0xFFFFFFFE, OSD_APAGE_REQUEST_ALL = 0xFFFFFFFF, }; @@ -109,30 +106,10 @@ enum { OSD_ATTR_RI_PRODUCT_REVISION_LEVEL = 0x7, /* 4 */ OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER = 0x8, /* variable */ OSD_ATTR_RI_OSD_NAME = 0x9, /* variable */ - OSD_ATTR_RI_MAX_CDB_CONTINUATION_LEN = 0xA, /* 4 */ OSD_ATTR_RI_TOTAL_CAPACITY = 0x80, /* 8 */ OSD_ATTR_RI_USED_CAPACITY = 0x81, /* 8 */ OSD_ATTR_RI_NUMBER_OF_PARTITIONS = 0xC0, /* 8 */ OSD_ATTR_RI_CLOCK = 0x100, /* 6 */ - OARI_DEFAULT_ISOLATION_METHOD = 0X110, /* 1 */ - OARI_SUPPORTED_ISOLATION_METHODS = 0X111, /* 32 */ - - OARI_DATA_ATOMICITY_GUARANTEE = 0X120, /* 8 */ - OARI_DATA_ATOMICITY_ALIGNMENT = 0X121, /* 8 */ - OARI_ATTRIBUTES_ATOMICITY_GUARANTEE = 0X122, /* 8 */ - OARI_DATA_ATTRIBUTES_ATOMICITY_MULTIPLIER = 0X123, /* 1 */ - - OARI_MAXIMUM_SNAPSHOTS_COUNT = 0X1C1, /* 0 or 4 */ - OARI_MAXIMUM_CLONES_COUNT = 0X1C2, /* 0 or 4 */ - OARI_MAXIMUM_BRANCH_DEPTH = 0X1CC, /* 0 or 4 */ - OARI_SUPPORTED_OBJECT_DUPLICATION_METHOD_FIRST = 0X200, /* 0 or 4 */ - OARI_SUPPORTED_OBJECT_DUPLICATION_METHOD_LAST = 0X2ff, /* 0 or 4 */ - OARI_SUPPORTED_TIME_OF_DUPLICATION_METHOD_FIRST = 0X300, /* 0 or 4 */ - OARI_SUPPORTED_TIME_OF_DUPLICATION_METHOD_LAST = 0X30F, /* 0 or 4 */ - OARI_SUPPORT_FOR_DUPLICATED_OBJECT_FREEZING = 0X310, /* 0 or 4 */ - OARI_SUPPORT_FOR_SNAPSHOT_REFRESHING = 0X311, /* 0 or 1 */ - OARI_SUPPORTED_CDB_CONTINUATION_DESC_TYPE_FIRST = 0X7000001,/* 0 or 4 */ - OARI_SUPPORTED_CDB_CONTINUATION_DESC_TYPE_LAST = 0X700FFFF,/* 0 or 4 */ }; /* Root_Information_attributes_page does not have a get_page structure */ @@ -143,15 +120,7 @@ enum { OSD_ATTR_PI_PARTITION_ID = 0x1, /* 8 */ OSD_ATTR_PI_USERNAME = 0x9, /* variable */ OSD_ATTR_PI_USED_CAPACITY = 0x81, /* 8 */ - OSD_ATTR_PI_USED_CAPACITY_INCREMENT = 0x84, /* 0 or 8 */ OSD_ATTR_PI_NUMBER_OF_OBJECTS = 0xC1, /* 8 */ - - OSD_ATTR_PI_ACTUAL_DATA_SPACE = 0xD1, /* 0 or 8 */ - OSD_ATTR_PI_RESERVED_DATA_SPACE = 0xD2, /* 0 or 8 */ - OSD_ATTR_PI_DEFAULT_SNAPSHOT_DUPLICATION_METHOD = 0x200,/* 0 or 4 */ - OSD_ATTR_PI_DEFAULT_CLONE_DUPLICATION_METHOD = 0x201,/* 0 or 4 */ - OSD_ATTR_PI_DEFAULT_SP_TIME_OF_DUPLICATION = 0x300,/* 0 or 4 */ - OSD_ATTR_PI_DEFAULT_CLONE_TIME_OF_DUPLICATION = 0x301,/* 0 or 4 */ }; /* Partition Information attributes page does not have a get_page structure */ @@ -162,7 +131,6 @@ enum { OSD_ATTR_CI_PARTITION_ID = 0x1, /* 8 */ OSD_ATTR_CI_COLLECTION_OBJECT_ID = 0x2, /* 8 */ OSD_ATTR_CI_USERNAME = 0x9, /* variable */ - OSD_ATTR_CI_COLLECTION_TYPE = 0xA, /* 1 */ OSD_ATTR_CI_USED_CAPACITY = 0x81, /* 8 */ }; /* Collection Information attributes page does not have a get_page structure */ @@ -176,8 +144,6 @@ enum { OSD_ATTR_OI_USERNAME = 0x9, /* variable */ OSD_ATTR_OI_USED_CAPACITY = 0x81, /* 8 */ OSD_ATTR_OI_LOGICAL_LENGTH = 0x82, /* 8 */ - SD_ATTR_OI_ACTUAL_DATA_SPACE = 0XD1, /* 0 OR 8 */ - SD_ATTR_OI_RESERVED_DATA_SPACE = 0XD2, /* 0 OR 8 */ }; /* Object Information attributes page does not have a get_page structure */ @@ -282,18 +248,7 @@ struct object_timestamps_attributes_page { struct osd_timestamp data_modified_time; } __packed; -/* OSD2r05: 7.1.3.19 Attributes Access attributes page - * (OSD_APAGE_PARTITION_ATTR_ACCESS) - * - * each attribute is of the form below. Total array length is deduced - * from the attribute's length - * (See allowed_attributes_access of the struct osd_cap_object_descriptor) - */ -struct attributes_access_attr { - struct osd_attributes_list_attrid attr_list[0]; -} __packed; - -/* OSD2r05: 7.1.2.21 Collections attributes page */ +/* 7.1.2.19 Collections attributes page */ /* TBD */ /* 7.1.2.20 Root Policy/Security attributes page (OSD_APAGE_ROOT_SECURITY) */ @@ -369,29 +324,4 @@ struct object_security_attributes_page { __be32 policy_access_tag; } __packed; -/* OSD2r05: 7.1.3.31 Current Command attributes page - * (OSD_APAGE_CURRENT_COMMAND) - */ -enum { - OSD_ATTR_CC_RESPONSE_INTEGRITY_CHECK_VALUE = 0x1, /* 32 */ - OSD_ATTR_CC_OBJECT_TYPE = 0x2, /* 1 */ - OSD_ATTR_CC_PARTITION_ID = 0x3, /* 8 */ - OSD_ATTR_CC_OBJECT_ID = 0x4, /* 8 */ - OSD_ATTR_CC_STARTING_BYTE_ADDRESS_OF_APPEND = 0x5, /* 8 */ - OSD_ATTR_CC_CHANGE_IN_USED_CAPACITY = 0x6, /* 8 */ -}; - -/*TBD: osdv1_current_command_attributes_page */ - -struct osdv2_current_command_attributes_page { - struct osd_attr_page_header hdr; /* id=0xFFFFFFFE, size=0x44 */ - u8 response_integrity_check_value[OSD_CRYPTO_KEYID_SIZE]; - u8 object_type; - u8 reserved[3]; - __be64 partition_id; - __be64 object_id; - __be64 starting_byte_address_of_append; - __be64 change_in_used_capacity; -}; - #endif /*ndef __OSD_ATTRIBUTES_H__*/ diff --git a/trunk/include/scsi/osd_initiator.h b/trunk/include/scsi/osd_initiator.h index 02bd9f716357..b24d9616eb46 100644 --- a/trunk/include/scsi/osd_initiator.h +++ b/trunk/include/scsi/osd_initiator.h @@ -18,7 +18,6 @@ #include "osd_types.h" #include -#include /* Note: "NI" in comments below means "Not Implemented yet" */ @@ -48,7 +47,6 @@ enum osd_std_version { */ struct osd_dev { struct scsi_device *scsi_device; - struct file *file; unsigned def_timeout; #ifdef OSD_VER1_SUPPORT @@ -71,10 +69,6 @@ void osd_dev_fini(struct osd_dev *od); /* some hi level device operations */ int osd_auto_detect_ver(struct osd_dev *od, void *caps); /* GFP_KERNEL */ -static inline struct request_queue *osd_request_queue(struct osd_dev *od) -{ - return od->scsi_device->request_queue; -} /* we might want to use function vector in the future */ static inline void osd_dev_set_ver(struct osd_dev *od, enum osd_std_version v) @@ -369,9 +363,7 @@ void osd_req_create_object(struct osd_request *or, struct osd_obj_id *); void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *); void osd_req_write(struct osd_request *or, - const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len); -int osd_req_write_kern(struct osd_request *or, - const struct osd_obj_id *obj, u64 offset, void *buff, u64 len); + const struct osd_obj_id *, struct bio *data_out, u64 offset); void osd_req_append(struct osd_request *or, const struct osd_obj_id *, struct bio *data_out);/* NI */ void osd_req_create_write(struct osd_request *or, @@ -386,9 +378,7 @@ void osd_req_flush_object(struct osd_request *or, /*V2*/ u64 offset, /*V2*/ u64 len); void osd_req_read(struct osd_request *or, - const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len); -int osd_req_read_kern(struct osd_request *or, - const struct osd_obj_id *obj, u64 offset, void *buff, u64 len); + const struct osd_obj_id *, struct bio *data_in, u64 offset); /* * Root/Partition/Collection/Object Attributes commands diff --git a/trunk/include/scsi/osd_protocol.h b/trunk/include/scsi/osd_protocol.h index 2cc8e8b1cc19..62b2ab8c69d4 100644 --- a/trunk/include/scsi/osd_protocol.h +++ b/trunk/include/scsi/osd_protocol.h @@ -303,15 +303,7 @@ enum osd_service_actions { OSD_ACT_V2(REMOVE_MEMBER_OBJECTS, 0x21) OSD_ACT_V2(GET_MEMBER_ATTRIBUTES, 0x22) OSD_ACT_V2(SET_MEMBER_ATTRIBUTES, 0x23) - - OSD_ACT_V2(CREATE_CLONE, 0x28) - OSD_ACT_V2(CREATE_SNAPSHOT, 0x29) - OSD_ACT_V2(DETACH_CLONE, 0x2A) - OSD_ACT_V2(REFRESH_SNAPSHOT_CLONE, 0x2B) - OSD_ACT_V2(RESTORE_PARTITION_FROM_SNAPSHOT, 0x2C) - OSD_ACT_V2(READ_MAP, 0x31) - OSD_ACT_V2(READ_MAPS_COMPARE, 0x32) OSD_ACT_V1_V2(PERFORM_SCSI_COMMAND, 0x8F7E, 0x8F7C) OSD_ACT_V1_V2(SCSI_TASK_MANAGEMENT, 0x8F7F, 0x8F7D) diff --git a/trunk/include/scsi/scsi_transport_iscsi.h b/trunk/include/scsi/scsi_transport_iscsi.h index 349c7f30720d..457588e1119b 100644 --- a/trunk/include/scsi/scsi_transport_iscsi.h +++ b/trunk/include/scsi/scsi_transport_iscsi.h @@ -126,14 +126,12 @@ struct iscsi_transport { int *index, int *age); void (*session_recovery_timedout) (struct iscsi_cls_session *session); - struct iscsi_endpoint *(*ep_connect) (struct Scsi_Host *shost, - struct sockaddr *dst_addr, + struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr, int non_blocking); int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms); void (*ep_disconnect) (struct iscsi_endpoint *ep); int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type, uint32_t enable, struct sockaddr *dst_addr); - int (*set_path) (struct Scsi_Host *shost, struct iscsi_path *params); }; /* @@ -150,10 +148,6 @@ extern void iscsi_conn_error_event(struct iscsi_cls_conn *conn, extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, char *data, uint32_t data_size); -extern int iscsi_offload_mesg(struct Scsi_Host *shost, - struct iscsi_transport *transport, uint32_t type, - char *data, uint16_t data_size); - struct iscsi_cls_conn { struct list_head conn_list; /* item in connlist */ void *dd_data; /* LLD private data */ diff --git a/trunk/init/main.c b/trunk/init/main.c index 5616661eac01..b3e8f14c568a 100644 --- a/trunk/init/main.c +++ b/trunk/init/main.c @@ -539,6 +539,11 @@ void __init __weak thread_info_cache_init(void) */ static void __init mm_init(void) { + /* + * page_cgroup requires countinous pages as memmap + * and it's bigger than MAX_ORDER unless SPARSEMEM. + */ + page_cgroup_init_flatmem(); mem_init(); kmem_cache_init(); vmalloc_init(); diff --git a/trunk/kernel/module.c b/trunk/kernel/module.c index e4ab36ce7672..35f7de00bf0d 100644 --- a/trunk/kernel/module.c +++ b/trunk/kernel/module.c @@ -2455,7 +2455,6 @@ SYSCALL_DEFINE3(init_module, void __user *, umod, mutex_lock(&module_mutex); /* Drop initial reference. */ module_put(mod); - trim_init_extable(mod); module_free(mod, mod->module_init); mod->module_init = NULL; mod->init_size = 0; diff --git a/trunk/kernel/params.c b/trunk/kernel/params.c index 7f6912ced2ba..de273ec85bd2 100644 --- a/trunk/kernel/params.c +++ b/trunk/kernel/params.c @@ -24,6 +24,9 @@ #include #include +/* We abuse the high bits of "perm" to record whether we kmalloc'ed. */ +#define KPARAM_KMALLOCED 0x80000000 + #if 0 #define DEBUGP printk #else @@ -217,13 +220,13 @@ int param_set_charp(const char *val, struct kernel_param *kp) return -ENOSPC; } - if (kp->flags & KPARAM_KMALLOCED) + if (kp->perm & KPARAM_KMALLOCED) kfree(*(char **)kp->arg); /* This is a hack. We can't need to strdup in early boot, and we * don't need to; this mangled commandline is preserved. */ if (slab_is_available()) { - kp->flags |= KPARAM_KMALLOCED; + kp->perm |= KPARAM_KMALLOCED; *(char **)kp->arg = kstrdup(val, GFP_KERNEL); if (!kp->arg) return -ENOMEM; @@ -238,63 +241,44 @@ int param_get_charp(char *buffer, struct kernel_param *kp) return sprintf(buffer, "%s", *((char **)kp->arg)); } -/* Actually could be a bool or an int, for historical reasons. */ int param_set_bool(const char *val, struct kernel_param *kp) { - bool v; - /* No equals means "set"... */ if (!val) val = "1"; /* One of =[yYnN01] */ switch (val[0]) { case 'y': case 'Y': case '1': - v = true; - break; + *(int *)kp->arg = 1; + return 0; case 'n': case 'N': case '0': - v = false; - break; - default: - return -EINVAL; + *(int *)kp->arg = 0; + return 0; } - - if (kp->flags & KPARAM_ISBOOL) - *(bool *)kp->arg = v; - else - *(int *)kp->arg = v; - return 0; + return -EINVAL; } int param_get_bool(char *buffer, struct kernel_param *kp) { - bool val; - if (kp->flags & KPARAM_ISBOOL) - val = *(bool *)kp->arg; - else - val = *(int *)kp->arg; - /* Y and N chosen as being relatively non-coder friendly */ - return sprintf(buffer, "%c", val ? 'Y' : 'N'); + return sprintf(buffer, "%c", (*(int *)kp->arg) ? 'Y' : 'N'); } -/* This one must be bool. */ int param_set_invbool(const char *val, struct kernel_param *kp) { - int ret; - bool boolval; + int boolval, ret; struct kernel_param dummy; dummy.arg = &boolval; - dummy.flags = KPARAM_ISBOOL; ret = param_set_bool(val, &dummy); if (ret == 0) - *(bool *)kp->arg = !boolval; + *(int *)kp->arg = !boolval; return ret; } int param_get_invbool(char *buffer, struct kernel_param *kp) { - return sprintf(buffer, "%c", (*(bool *)kp->arg) ? 'N' : 'Y'); + return sprintf(buffer, "%c", (*(int *)kp->arg) ? 'N' : 'Y'); } /* We break the rule and mangle the string. */ @@ -607,7 +591,7 @@ void destroy_params(const struct kernel_param *params, unsigned num) unsigned int i; for (i = 0; i < num; i++) - if (params[i].flags & KPARAM_KMALLOCED) + if (params[i].perm & KPARAM_KMALLOCED) kfree(*(char **)params[i].arg); } diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 8ec9d13140be..f04aa9664504 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -2192,7 +2192,6 @@ void kick_process(struct task_struct *p) smp_send_reschedule(cpu); preempt_enable(); } -EXPORT_SYMBOL_GPL(kick_process); /* * Return a low guess at the load of a migration-source cpu weighted diff --git a/trunk/lib/extable.c b/trunk/lib/extable.c index 4cac81ec225e..179c08745595 100644 --- a/trunk/lib/extable.c +++ b/trunk/lib/extable.c @@ -39,26 +39,7 @@ void sort_extable(struct exception_table_entry *start, sort(start, finish - start, sizeof(struct exception_table_entry), cmp_ex, NULL); } - -#ifdef CONFIG_MODULES -/* - * If the exception table is sorted, any referring to the module init - * will be at the beginning or the end. - */ -void trim_init_extable(struct module *m) -{ - /*trim the beginning*/ - while (m->num_exentries && within_module_init(m->extable[0].insn, m)) { - m->extable++; - m->num_exentries--; - } - /*trim the end*/ - while (m->num_exentries && - within_module_init(m->extable[m->num_exentries-1].insn, m)) - m->num_exentries--; -} -#endif /* CONFIG_MODULES */ -#endif /* !ARCH_HAS_SORT_EXTABLE */ +#endif #ifndef ARCH_HAS_SEARCH_EXTABLE /* diff --git a/trunk/mm/page_cgroup.c b/trunk/mm/page_cgroup.c index 3dd4a909a1de..11a8a10a3909 100644 --- a/trunk/mm/page_cgroup.c +++ b/trunk/mm/page_cgroup.c @@ -47,8 +47,6 @@ static int __init alloc_node_page_cgroup(int nid) struct page_cgroup *base, *pc; unsigned long table_size; unsigned long start_pfn, nr_pages, index; - struct page *page; - unsigned int order; start_pfn = NODE_DATA(nid)->node_start_pfn; nr_pages = NODE_DATA(nid)->node_spanned_pages; @@ -57,13 +55,11 @@ static int __init alloc_node_page_cgroup(int nid) return 0; table_size = sizeof(struct page_cgroup) * nr_pages; - order = get_order(table_size); - page = alloc_pages_node(nid, GFP_NOWAIT | __GFP_ZERO, order); - if (!page) - page = alloc_pages_node(-1, GFP_NOWAIT | __GFP_ZERO, order); - if (!page) + + base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), + table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); + if (!base) return -ENOMEM; - base = page_address(page); for (index = 0; index < nr_pages; index++) { pc = base + index; __init_page_cgroup(pc, start_pfn + index); @@ -73,7 +69,7 @@ static int __init alloc_node_page_cgroup(int nid) return 0; } -void __init page_cgroup_init(void) +void __init page_cgroup_init_flatmem(void) { int nid, fail; @@ -117,16 +113,11 @@ static int __init_refok init_section_page_cgroup(unsigned long pfn) if (!section->page_cgroup) { nid = page_to_nid(pfn_to_page(pfn)); table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; - if (slab_is_available()) { - base = kmalloc_node(table_size, - GFP_KERNEL | __GFP_NOWARN, nid); - if (!base) - base = vmalloc_node(table_size, nid); - } else { - base = __alloc_bootmem_node_nopanic(NODE_DATA(nid), - table_size, - PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); - } + VM_BUG_ON(!slab_is_available()); + base = kmalloc_node(table_size, + GFP_KERNEL | __GFP_NOWARN, nid); + if (!base) + base = vmalloc_node(table_size, nid); } else { /* * We don't have to allocate page_cgroup again, but diff --git a/trunk/net/9p/trans_virtio.c b/trunk/net/9p/trans_virtio.c index a49484e67e1d..bb8579a141a8 100644 --- a/trunk/net/9p/trans_virtio.c +++ b/trunk/net/9p/trans_virtio.c @@ -246,7 +246,7 @@ static int p9_virtio_probe(struct virtio_device *vdev) chan->vdev = vdev; /* We expect one virtqueue, for requests. */ - chan->vq = virtio_find_single_vq(vdev, req_done, "requests"); + chan->vq = vdev->config->find_vq(vdev, 0, req_done); if (IS_ERR(chan->vq)) { err = PTR_ERR(chan->vq); goto out_free_vq; @@ -261,7 +261,7 @@ static int p9_virtio_probe(struct virtio_device *vdev) return 0; out_free_vq: - vdev->config->del_vqs(vdev); + vdev->config->del_vq(chan->vq); fail: mutex_lock(&virtio_9p_lock); chan_index--; @@ -332,7 +332,7 @@ static void p9_virtio_remove(struct virtio_device *vdev) BUG_ON(chan->inuse); if (chan->initialized) { - vdev->config->del_vqs(vdev); + vdev->config->del_vq(chan->vq); chan->initialized = false; } } diff --git a/trunk/scripts/mod/file2alias.c b/trunk/scripts/mod/file2alias.c index 40e0045876ee..a3344285ccf4 100644 --- a/trunk/scripts/mod/file2alias.c +++ b/trunk/scripts/mod/file2alias.c @@ -641,7 +641,7 @@ static int do_virtio_entry(const char *filename, struct virtio_device_id *id, id->vendor = TO_NATIVE(id->vendor); strcpy(alias, "virtio:"); - ADD(alias, "d", id->device != VIRTIO_DEV_ANY_ID, id->device); + ADD(alias, "d", 1, id->device); ADD(alias, "v", id->vendor != VIRTIO_DEV_ANY_ID, id->vendor); add_wildcard(alias);