From 81814d4b98a6e68c746d6874dabd105b521b930a Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Wed, 12 Jan 2011 09:50:36 -0800 Subject: [PATCH] --- yaml --- r: 231771 b: refs/heads/master c: 4979d18fe105297f8f065743f31f8f735da8df2d h: refs/heads/master i: 231769: bd06fb712e0591e1616f226ea3f730b66dd71b09 231767: f768c0a3bf44bd9fada43a6861b025857615887d v: v3 --- [refs] | 2 +- trunk/drivers/infiniband/core/cache.c | 4 +-- trunk/drivers/infiniband/core/device.c | 11 ++----- trunk/drivers/infiniband/core/sa_query.c | 2 +- trunk/drivers/infiniband/core/umem.c | 2 +- .../drivers/infiniband/hw/amso1100/c2_rnic.c | 5 +-- trunk/drivers/infiniband/hw/ehca/ipz_pt_fn.c | 5 +-- .../infiniband/hw/ipath/ipath_driver.c | 5 +-- .../infiniband/hw/ipath/ipath_file_ops.c | 11 +++++-- .../infiniband/hw/ipath/ipath_init_chip.c | 5 ++- .../infiniband/hw/ipath/ipath_user_pages.c | 2 +- trunk/drivers/infiniband/hw/mlx4/main.c | 3 +- trunk/drivers/infiniband/hw/qib/qib_iba7220.c | 7 ++-- trunk/drivers/infiniband/hw/qib/qib_iba7322.c | 14 ++++---- trunk/drivers/infiniband/hw/qib/qib_init.c | 33 +++++++++++++++---- trunk/drivers/infiniband/hw/qib/qib_qsfp.c | 9 ++--- trunk/drivers/infiniband/hw/qib/qib_verbs.h | 3 +- trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c | 10 ++++-- .../drivers/infiniband/ulp/ipoib/ipoib_main.c | 3 +- trunk/drivers/infiniband/ulp/srp/ib_srp.c | 4 +-- trunk/drivers/net/mlx4/catas.c | 6 ++-- trunk/drivers/net/mlx4/en_main.c | 3 +- trunk/drivers/net/mlx4/main.c | 2 +- trunk/include/rdma/ib_verbs.h | 3 -- 24 files changed, 93 insertions(+), 61 deletions(-) diff --git a/[refs] b/[refs] index 91d289a1916c..d86b423a0254 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: f06267104dd9112f11586830d22501d0e26245ea +refs/heads/master: 4979d18fe105297f8f065743f31f8f735da8df2d diff --git a/trunk/drivers/infiniband/core/cache.c b/trunk/drivers/infiniband/core/cache.c index f9ba7d74dfc0..68883565b725 100644 --- a/trunk/drivers/infiniband/core/cache.c +++ b/trunk/drivers/infiniband/core/cache.c @@ -308,7 +308,7 @@ static void ib_cache_event(struct ib_event_handler *handler, INIT_WORK(&work->work, ib_cache_task); work->device = event->device; work->port_num = event->element.port_num; - queue_work(ib_wq, &work->work); + schedule_work(&work->work); } } } @@ -368,7 +368,7 @@ static void ib_cache_cleanup_one(struct ib_device *device) int p; ib_unregister_event_handler(&device->cache.event_handler); - flush_workqueue(ib_wq); + flush_scheduled_work(); for (p = 0; p <= end_port(device) - start_port(device); ++p) { kfree(device->cache.pkey_cache[p]); diff --git a/trunk/drivers/infiniband/core/device.c b/trunk/drivers/infiniband/core/device.c index f793bf2f5da7..a19effad0811 100644 --- a/trunk/drivers/infiniband/core/device.c +++ b/trunk/drivers/infiniband/core/device.c @@ -38,6 +38,7 @@ #include #include #include +#include #include "core_priv.h" @@ -51,9 +52,6 @@ struct ib_client_data { void * data; }; -struct workqueue_struct *ib_wq; -EXPORT_SYMBOL_GPL(ib_wq); - static LIST_HEAD(device_list); static LIST_HEAD(client_list); @@ -720,10 +718,6 @@ static int __init ib_core_init(void) { int ret; - ib_wq = alloc_workqueue("infiniband", 0, 0); - if (!ib_wq) - return -ENOMEM; - ret = ib_sysfs_setup(); if (ret) printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); @@ -732,7 +726,6 @@ static int __init ib_core_init(void) if (ret) { printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); ib_sysfs_cleanup(); - destroy_workqueue(ib_wq); } return ret; @@ -743,7 +736,7 @@ static void __exit ib_core_cleanup(void) ib_cache_cleanup(); ib_sysfs_cleanup(); /* Make sure that any pending umem accounting work is done. */ - destroy_workqueue(ib_wq); + flush_scheduled_work(); } module_init(ib_core_init); diff --git a/trunk/drivers/infiniband/core/sa_query.c b/trunk/drivers/infiniband/core/sa_query.c index e38be1bcc01c..91a660310b7c 100644 --- a/trunk/drivers/infiniband/core/sa_query.c +++ b/trunk/drivers/infiniband/core/sa_query.c @@ -425,7 +425,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event port->sm_ah = NULL; spin_unlock_irqrestore(&port->ah_lock, flags); - queue_work(ib_wq, &sa_dev->port[event->element.port_num - + schedule_work(&sa_dev->port[event->element.port_num - sa_dev->start_port].update_task); } } diff --git a/trunk/drivers/infiniband/core/umem.c b/trunk/drivers/infiniband/core/umem.c index b645e558876f..415e186eee32 100644 --- a/trunk/drivers/infiniband/core/umem.c +++ b/trunk/drivers/infiniband/core/umem.c @@ -262,7 +262,7 @@ void ib_umem_release(struct ib_umem *umem) umem->mm = mm; umem->diff = diff; - queue_work(ib_wq, &umem->work); + schedule_work(&umem->work); return; } } else diff --git a/trunk/drivers/infiniband/hw/amso1100/c2_rnic.c b/trunk/drivers/infiniband/hw/amso1100/c2_rnic.c index 8c81992fa6db..85cfae4cad71 100644 --- a/trunk/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/trunk/drivers/infiniband/hw/amso1100/c2_rnic.c @@ -459,12 +459,13 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev) IB_DEVICE_MEM_WINDOW); /* Allocate the qptr_array */ - c2dev->qptr_array = vzalloc(C2_MAX_CQS * sizeof(void *)); + c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *)); if (!c2dev->qptr_array) { return -ENOMEM; } - /* Initialize the qptr_array */ + /* Inialize the qptr_array */ + memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *)); c2dev->qptr_array[0] = (void *) &c2dev->req_vq; c2dev->qptr_array[1] = (void *) &c2dev->rep_vq; c2dev->qptr_array[2] = (void *) &c2dev->aeq; diff --git a/trunk/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/trunk/drivers/infiniband/hw/ehca/ipz_pt_fn.c index 1898d6e7cce5..1596e3085344 100644 --- a/trunk/drivers/infiniband/hw/ehca/ipz_pt_fn.c +++ b/trunk/drivers/infiniband/hw/ehca/ipz_pt_fn.c @@ -222,14 +222,15 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue, queue->small_page = NULL; /* allocate queue page pointers */ - queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); + queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); if (!queue->queue_pages) { - queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *)); + queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *)); if (!queue->queue_pages) { ehca_gen_err("Couldn't allocate queue page list"); return 0; } } + memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *)); /* allocate actual queue pages */ if (is_small) { diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_driver.c b/trunk/drivers/infiniband/hw/ipath/ipath_driver.c index 47db4bf34628..b33f0457a1ff 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_driver.c @@ -199,11 +199,12 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev) goto bail; } - dd = vzalloc(sizeof(*dd)); + dd = vmalloc(sizeof(*dd)); if (!dd) { dd = ERR_PTR(-ENOMEM); goto bail; } + memset(dd, 0, sizeof(*dd)); dd->ipath_unit = -1; spin_lock_irqsave(&ipath_devs_lock, flags); @@ -755,7 +756,7 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev) */ ipath_shutdown_device(dd); - flush_workqueue(ib_wq); + flush_scheduled_work(); if (dd->verbs_dev) ipath_unregister_ib_device(dd->verbs_dev); diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_file_ops.c b/trunk/drivers/infiniband/hw/ipath/ipath_file_ops.c index 6d4b29c4cd89..9292a15ad7c4 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_file_ops.c @@ -1530,7 +1530,7 @@ static int init_subports(struct ipath_devdata *dd, } num_subports = uinfo->spu_subport_cnt; - pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports); + pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports); if (!pd->subport_uregbase) { ret = -ENOMEM; goto bail; @@ -1538,13 +1538,13 @@ static int init_subports(struct ipath_devdata *dd, /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * sizeof(u32), PAGE_SIZE) * num_subports; - pd->subport_rcvhdr_base = vzalloc(size); + pd->subport_rcvhdr_base = vmalloc(size); if (!pd->subport_rcvhdr_base) { ret = -ENOMEM; goto bail_ureg; } - pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks * + pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size * num_subports); if (!pd->subport_rcvegrbuf) { @@ -1556,6 +1556,11 @@ static int init_subports(struct ipath_devdata *dd, pd->port_subport_id = uinfo->spu_subport_id; pd->active_slaves = 1; set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); + memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports); + memset(pd->subport_rcvhdr_base, 0, size); + memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks * + pd->port_rcvegrbuf_size * + num_subports); goto bail; bail_rhdr: diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_init_chip.c b/trunk/drivers/infiniband/hw/ipath/ipath_init_chip.c index fef0f4201257..776938299e4c 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_init_chip.c @@ -442,7 +442,7 @@ static void init_shadow_tids(struct ipath_devdata *dd) struct page **pages; dma_addr_t *addrs; - pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * + pages = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt * sizeof(struct page *)); if (!pages) { ipath_dev_err(dd, "failed to allocate shadow page * " @@ -461,6 +461,9 @@ static void init_shadow_tids(struct ipath_devdata *dd) return; } + memset(pages, 0, dd->ipath_cfgports * dd->ipath_rcvtidcnt * + sizeof(struct page *)); + dd->ipath_pageshadow = pages; dd->ipath_physshadow = addrs; } diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_user_pages.c b/trunk/drivers/infiniband/hw/ipath/ipath_user_pages.c index bab9f74c0665..5e86d73eba2a 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_user_pages.c @@ -220,7 +220,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages) work->mm = mm; work->num_pages = num_pages; - queue_work(ib_wq, &work->work); + schedule_work(&work->work); return; bail_mm: diff --git a/trunk/drivers/infiniband/hw/mlx4/main.c b/trunk/drivers/infiniband/hw/mlx4/main.c index 4c85224aeaa7..d68d849ab866 100644 --- a/trunk/drivers/infiniband/hw/mlx4/main.c +++ b/trunk/drivers/infiniband/hw/mlx4/main.c @@ -1005,7 +1005,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) if (mlx4_uar_alloc(dev, &ibdev->priv_uar)) goto err_pd; - ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); + ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT, + PAGE_SIZE); if (!ibdev->uar_map) goto err_uar; MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); diff --git a/trunk/drivers/infiniband/hw/qib/qib_iba7220.c b/trunk/drivers/infiniband/hw/qib/qib_iba7220.c index de799f17cb9e..127a0d5069f0 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/trunk/drivers/infiniband/hw/qib/qib_iba7220.c @@ -1692,7 +1692,8 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd) ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; spin_unlock_irqrestore(&ppd->lflags_lock, flags); wake_up(&ppd->cpspec->autoneg_wait); - cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); + cancel_delayed_work(&ppd->cpspec->autoneg_work); + flush_scheduled_work(); shutdown_7220_relock_poll(ppd->dd); val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg); @@ -3514,8 +3515,8 @@ static void try_7220_autoneg(struct qib_pportdata *ppd) toggle_7220_rclkrls(ppd->dd); /* 2 msec is minimum length of a poll cycle */ - queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, - msecs_to_jiffies(2)); + schedule_delayed_work(&ppd->cpspec->autoneg_work, + msecs_to_jiffies(2)); } /* diff --git a/trunk/drivers/infiniband/hw/qib/qib_iba7322.c b/trunk/drivers/infiniband/hw/qib/qib_iba7322.c index ea46fbc34b17..dbbb0e85afe4 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/trunk/drivers/infiniband/hw/qib/qib_iba7322.c @@ -2406,9 +2406,10 @@ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd) ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG; spin_unlock_irqrestore(&ppd->lflags_lock, flags); wake_up(&ppd->cpspec->autoneg_wait); - cancel_delayed_work_sync(&ppd->cpspec->autoneg_work); + cancel_delayed_work(&ppd->cpspec->autoneg_work); if (ppd->dd->cspec->r1) - cancel_delayed_work_sync(&ppd->cpspec->ipg_work); + cancel_delayed_work(&ppd->cpspec->ipg_work); + flush_scheduled_work(); ppd->cpspec->chase_end = 0; if (ppd->cpspec->chase_timer.data) /* if initted */ @@ -2705,7 +2706,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd) if (!(pins & mask)) { ++handled; qd->t_insert = get_jiffies_64(); - queue_work(ib_wq, &qd->work); + schedule_work(&qd->work); } } } @@ -4989,8 +4990,8 @@ static void try_7322_autoneg(struct qib_pportdata *ppd) set_7322_ibspeed_fast(ppd, QIB_IB_DDR); qib_7322_mini_pcs_reset(ppd); /* 2 msec is minimum length of a poll cycle */ - queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work, - msecs_to_jiffies(2)); + schedule_delayed_work(&ppd->cpspec->autoneg_work, + msecs_to_jiffies(2)); } /* @@ -5120,8 +5121,7 @@ static void try_7322_ipg(struct qib_pportdata *ppd) ib_free_send_mad(send_buf); retry: delay = 2 << ppd->cpspec->ipg_tries; - queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work, - msecs_to_jiffies(delay)); + schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay)); } /* diff --git a/trunk/drivers/infiniband/hw/qib/qib_init.c b/trunk/drivers/infiniband/hw/qib/qib_init.c index ffefb78b8949..7896afbb9ce8 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_init.c +++ b/trunk/drivers/infiniband/hw/qib/qib_init.c @@ -80,6 +80,7 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); +struct workqueue_struct *qib_wq; struct workqueue_struct *qib_cq_wq; static void verify_interrupt(unsigned long); @@ -269,20 +270,23 @@ static void init_shadow_tids(struct qib_devdata *dd) struct page **pages; dma_addr_t *addrs; - pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); + pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); if (!pages) { qib_dev_err(dd, "failed to allocate shadow page * " "array, no expected sends!\n"); goto bail; } - addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); + addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); if (!addrs) { qib_dev_err(dd, "failed to allocate shadow dma handle " "array, no expected sends!\n"); goto bail_free; } + memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); + memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); + dd->pageshadow = pages; dd->physshadow = addrs; return; @@ -1043,10 +1047,24 @@ static int __init qlogic_ib_init(void) if (ret) goto bail; + /* + * We create our own workqueue mainly because we want to be + * able to flush it when devices are being removed. We can't + * use schedule_work()/flush_scheduled_work() because both + * unregister_netdev() and linkwatch_event take the rtnl lock, + * so flush_scheduled_work() can deadlock during device + * removal. + */ + qib_wq = create_workqueue("qib"); + if (!qib_wq) { + ret = -ENOMEM; + goto bail_dev; + } + qib_cq_wq = create_singlethread_workqueue("qib_cq"); if (!qib_cq_wq) { ret = -ENOMEM; - goto bail_dev; + goto bail_wq; } /* @@ -1076,6 +1094,8 @@ static int __init qlogic_ib_init(void) idr_destroy(&qib_unit_table); bail_cq_wq: destroy_workqueue(qib_cq_wq); +bail_wq: + destroy_workqueue(qib_wq); bail_dev: qib_dev_cleanup(); bail: @@ -1099,6 +1119,7 @@ static void __exit qlogic_ib_cleanup(void) pci_unregister_driver(&qib_driver); + destroy_workqueue(qib_wq); destroy_workqueue(qib_cq_wq); qib_cpulist_count = 0; @@ -1271,7 +1292,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev, if (qib_mini_init || initfail || ret) { qib_stop_timers(dd); - flush_workqueue(ib_wq); + flush_scheduled_work(); for (pidx = 0; pidx < dd->num_pports; ++pidx) dd->f_quiet_serdes(dd->pport + pidx); if (qib_mini_init) @@ -1320,8 +1341,8 @@ static void __devexit qib_remove_one(struct pci_dev *pdev) qib_stop_timers(dd); - /* wait until all of our (qsfp) queue_work() calls complete */ - flush_workqueue(ib_wq); + /* wait until all of our (qsfp) schedule_work() calls complete */ + flush_scheduled_work(); ret = qibfs_remove(dd); if (ret) diff --git a/trunk/drivers/infiniband/hw/qib/qib_qsfp.c b/trunk/drivers/infiniband/hw/qib/qib_qsfp.c index 3374a52232c1..35b3604b691d 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_qsfp.c +++ b/trunk/drivers/infiniband/hw/qib/qib_qsfp.c @@ -485,7 +485,7 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, goto bail; /* We see a module, but it may be unwise to look yet. Just schedule */ qd->t_insert = get_jiffies_64(); - queue_work(ib_wq, &qd->work); + schedule_work(&qd->work); bail: return; } @@ -493,9 +493,10 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, void qib_qsfp_deinit(struct qib_qsfp_data *qd) { /* - * There is nothing to do here for now. our work is scheduled - * with queue_work(), and flush_workqueue() from remove_one - * will block until all work setup with queue_work() + * There is nothing to do here for now. our + * work is scheduled with schedule_work(), and + * flush_scheduled_work() from remove_one will + * block until all work ssetup with schedule_work() * completes. */ } diff --git a/trunk/drivers/infiniband/hw/qib/qib_verbs.h b/trunk/drivers/infiniband/hw/qib/qib_verbs.h index 95e5b47223b3..63b22a9a7feb 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_verbs.h +++ b/trunk/drivers/infiniband/hw/qib/qib_verbs.h @@ -805,6 +805,7 @@ static inline int qib_send_ok(struct qib_qp *qp) !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); } +extern struct workqueue_struct *qib_wq; extern struct workqueue_struct *qib_cq_wq; /* @@ -813,7 +814,7 @@ extern struct workqueue_struct *qib_cq_wq; static inline void qib_schedule_send(struct qib_qp *qp) { if (qib_send_ok(qp)) - queue_work(ib_wq, &qp->s_work); + queue_work(qib_wq, &qp->s_work); } static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 93d55806b967..c1c49f2d35b5 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -352,13 +352,15 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i int ret; int i; - rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring); + rx->rx_ring = vmalloc(ipoib_recvq_size * sizeof *rx->rx_ring); if (!rx->rx_ring) { printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n", priv->ca->name, ipoib_recvq_size); return -ENOMEM; } + memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring); + t = kmalloc(sizeof *t, GFP_KERNEL); if (!t) { ret = -ENOMEM; @@ -1095,12 +1097,13 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, struct ipoib_dev_priv *priv = netdev_priv(p->dev); int ret; - p->tx_ring = vzalloc(ipoib_sendq_size * sizeof *p->tx_ring); + p->tx_ring = vmalloc(ipoib_sendq_size * sizeof *p->tx_ring); if (!p->tx_ring) { ipoib_warn(priv, "failed to allocate tx ring\n"); ret = -ENOMEM; goto err_tx; } + memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring); p->qp = ipoib_cm_create_tx_qp(p->dev, p); if (IS_ERR(p->qp)) { @@ -1518,7 +1521,7 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge) return; } - priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring); + priv->cm.srq_ring = vmalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring); if (!priv->cm.srq_ring) { printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n", priv->ca->name, ipoib_recvq_size); @@ -1527,6 +1530,7 @@ static void ipoib_cm_create_srq(struct net_device *dev, int max_sge) return; } + memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring); } int ipoib_cm_dev_init(struct net_device *dev) diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c index aca3b44f7aed..7a07a728fe0d 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -916,12 +916,13 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) goto out; } - priv->tx_ring = vzalloc(ipoib_sendq_size * sizeof *priv->tx_ring); + priv->tx_ring = vmalloc(ipoib_sendq_size * sizeof *priv->tx_ring); if (!priv->tx_ring) { printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", ca->name, ipoib_sendq_size); goto out_rx_ring_cleanup; } + memset(priv->tx_ring, 0, ipoib_sendq_size * sizeof *priv->tx_ring); /* priv->tx_head, tx_tail & tx_outstanding are already 0 */ diff --git a/trunk/drivers/infiniband/ulp/srp/ib_srp.c b/trunk/drivers/infiniband/ulp/srp/ib_srp.c index 70ecb949683e..4b62105ed1e8 100644 --- a/trunk/drivers/infiniband/ulp/srp/ib_srp.c +++ b/trunk/drivers/infiniband/ulp/srp/ib_srp.c @@ -638,7 +638,7 @@ static int srp_reconnect_target(struct srp_target_port *target) if (target->state == SRP_TARGET_CONNECTING) { target->state = SRP_TARGET_DEAD; INIT_WORK(&target->work, srp_remove_work); - queue_work(ib_wq, &target->work); + schedule_work(&target->work); } spin_unlock_irq(&target->lock); @@ -2199,7 +2199,7 @@ static void srp_remove_one(struct ib_device *device) * started before we marked our target ports as * removed, and any target port removal tasks. */ - flush_workqueue(ib_wq); + flush_scheduled_work(); list_for_each_entry_safe(target, tmp_target, &host->target_list, list) { diff --git a/trunk/drivers/net/mlx4/catas.c b/trunk/drivers/net/mlx4/catas.c index 68aaa42d0ced..32f947154c33 100644 --- a/trunk/drivers/net/mlx4/catas.c +++ b/trunk/drivers/net/mlx4/catas.c @@ -113,7 +113,7 @@ static void catas_reset(struct work_struct *work) void mlx4_start_catas_poll(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); - unsigned long addr; + phys_addr_t addr; INIT_LIST_HEAD(&priv->catas_err.list); init_timer(&priv->catas_err.timer); @@ -124,8 +124,8 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev) priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); if (!priv->catas_err.map) { - mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n", - addr); + mlx4_warn(dev, "Failed to map internal error buffer at 0x%llx\n", + (unsigned long long) addr); return; } diff --git a/trunk/drivers/net/mlx4/en_main.c b/trunk/drivers/net/mlx4/en_main.c index f6e0d40cd876..1ff6ca6466ed 100644 --- a/trunk/drivers/net/mlx4/en_main.c +++ b/trunk/drivers/net/mlx4/en_main.c @@ -202,7 +202,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev) if (mlx4_uar_alloc(dev, &mdev->priv_uar)) goto err_pd; - mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); + mdev->uar_map = ioremap((phys_addr_t) mdev->priv_uar.pfn << PAGE_SHIFT, + PAGE_SIZE); if (!mdev->uar_map) goto err_uar; spin_lock_init(&mdev->uar_lock); diff --git a/trunk/drivers/net/mlx4/main.c b/trunk/drivers/net/mlx4/main.c index 782f11d8fa71..4ffdc18fcb8a 100644 --- a/trunk/drivers/net/mlx4/main.c +++ b/trunk/drivers/net/mlx4/main.c @@ -829,7 +829,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) goto err_uar_table_free; } - priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); + priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); if (!priv->kar) { mlx4_err(dev, "Couldn't map kernel access region, " "aborting.\n"); diff --git a/trunk/include/rdma/ib_verbs.h b/trunk/include/rdma/ib_verbs.h index 55cd0a0bc977..e04c4888d1fd 100644 --- a/trunk/include/rdma/ib_verbs.h +++ b/trunk/include/rdma/ib_verbs.h @@ -47,13 +47,10 @@ #include #include #include -#include #include #include -extern struct workqueue_struct *ib_wq; - union ib_gid { u8 raw[16]; struct {