Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 231771
b: refs/heads/master
c: 4979d18
h: refs/heads/master
i:
  231769: bd06fb7
  231767: f768c0a
v: v3
  • Loading branch information
Roland Dreier committed Jan 12, 2011
1 parent 9cddd58 commit 81814d4
Show file tree
Hide file tree
Showing 24 changed files with 93 additions and 61 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f06267104dd9112f11586830d22501d0e26245ea
refs/heads/master: 4979d18fe105297f8f065743f31f8f735da8df2d
4 changes: 2 additions & 2 deletions trunk/drivers/infiniband/core/cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ static void ib_cache_event(struct ib_event_handler *handler,
INIT_WORK(&work->work, ib_cache_task);
work->device = event->device;
work->port_num = event->element.port_num;
queue_work(ib_wq, &work->work);
schedule_work(&work->work);
}
}
}
Expand Down Expand Up @@ -368,7 +368,7 @@ static void ib_cache_cleanup_one(struct ib_device *device)
int p;

ib_unregister_event_handler(&device->cache.event_handler);
flush_workqueue(ib_wq);
flush_scheduled_work();

for (p = 0; p <= end_port(device) - start_port(device); ++p) {
kfree(device->cache.pkey_cache[p]);
Expand Down
11 changes: 2 additions & 9 deletions trunk/drivers/infiniband/core/device.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>

#include "core_priv.h"

Expand All @@ -51,9 +52,6 @@ struct ib_client_data {
void * data;
};

struct workqueue_struct *ib_wq;
EXPORT_SYMBOL_GPL(ib_wq);

static LIST_HEAD(device_list);
static LIST_HEAD(client_list);

Expand Down Expand Up @@ -720,10 +718,6 @@ static int __init ib_core_init(void)
{
int ret;

ib_wq = alloc_workqueue("infiniband", 0, 0);
if (!ib_wq)
return -ENOMEM;

ret = ib_sysfs_setup();
if (ret)
printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
Expand All @@ -732,7 +726,6 @@ static int __init ib_core_init(void)
if (ret) {
printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
ib_sysfs_cleanup();
destroy_workqueue(ib_wq);
}

return ret;
Expand All @@ -743,7 +736,7 @@ static void __exit ib_core_cleanup(void)
ib_cache_cleanup();
ib_sysfs_cleanup();
/* Make sure that any pending umem accounting work is done. */
destroy_workqueue(ib_wq);
flush_scheduled_work();
}

module_init(ib_core_init);
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/infiniband/core/sa_query.c
Original file line number Diff line number Diff line change
Expand Up @@ -425,7 +425,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
port->sm_ah = NULL;
spin_unlock_irqrestore(&port->ah_lock, flags);

queue_work(ib_wq, &sa_dev->port[event->element.port_num -
schedule_work(&sa_dev->port[event->element.port_num -
sa_dev->start_port].update_task);
}
}
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/infiniband/core/umem.c
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ void ib_umem_release(struct ib_umem *umem)
umem->mm = mm;
umem->diff = diff;

queue_work(ib_wq, &umem->work);
schedule_work(&umem->work);
return;
}
} else
Expand Down
5 changes: 3 additions & 2 deletions trunk/drivers/infiniband/hw/amso1100/c2_rnic.c
Original file line number Diff line number Diff line change
Expand Up @@ -459,12 +459,13 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)
IB_DEVICE_MEM_WINDOW);

/* Allocate the qptr_array */
c2dev->qptr_array = vzalloc(C2_MAX_CQS * sizeof(void *));
c2dev->qptr_array = vmalloc(C2_MAX_CQS * sizeof(void *));
if (!c2dev->qptr_array) {
return -ENOMEM;
}

/* Initialize the qptr_array */
/* Inialize the qptr_array */
memset(c2dev->qptr_array, 0, C2_MAX_CQS * sizeof(void *));
c2dev->qptr_array[0] = (void *) &c2dev->req_vq;
c2dev->qptr_array[1] = (void *) &c2dev->rep_vq;
c2dev->qptr_array[2] = (void *) &c2dev->aeq;
Expand Down
5 changes: 3 additions & 2 deletions trunk/drivers/infiniband/hw/ehca/ipz_pt_fn.c
Original file line number Diff line number Diff line change
Expand Up @@ -222,14 +222,15 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
queue->small_page = NULL;

/* allocate queue page pointers */
queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
if (!queue->queue_pages) {
queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
if (!queue->queue_pages) {
ehca_gen_err("Couldn't allocate queue page list");
return 0;
}
}
memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));

/* allocate actual queue pages */
if (is_small) {
Expand Down
5 changes: 3 additions & 2 deletions trunk/drivers/infiniband/hw/ipath/ipath_driver.c
Original file line number Diff line number Diff line change
Expand Up @@ -199,11 +199,12 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
goto bail;
}

dd = vzalloc(sizeof(*dd));
dd = vmalloc(sizeof(*dd));
if (!dd) {
dd = ERR_PTR(-ENOMEM);
goto bail;
}
memset(dd, 0, sizeof(*dd));
dd->ipath_unit = -1;

spin_lock_irqsave(&ipath_devs_lock, flags);
Expand Down Expand Up @@ -755,7 +756,7 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
*/
ipath_shutdown_device(dd);

flush_workqueue(ib_wq);
flush_scheduled_work();

if (dd->verbs_dev)
ipath_unregister_ib_device(dd->verbs_dev);
Expand Down
11 changes: 8 additions & 3 deletions trunk/drivers/infiniband/hw/ipath/ipath_file_ops.c
Original file line number Diff line number Diff line change
Expand Up @@ -1530,21 +1530,21 @@ static int init_subports(struct ipath_devdata *dd,
}

num_subports = uinfo->spu_subport_cnt;
pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports);
pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports);
if (!pd->subport_uregbase) {
ret = -ENOMEM;
goto bail;
}
/* Note: pd->port_rcvhdrq_size isn't initialized yet. */
size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
sizeof(u32), PAGE_SIZE) * num_subports;
pd->subport_rcvhdr_base = vzalloc(size);
pd->subport_rcvhdr_base = vmalloc(size);
if (!pd->subport_rcvhdr_base) {
ret = -ENOMEM;
goto bail_ureg;
}

pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks *
pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks *
pd->port_rcvegrbuf_size *
num_subports);
if (!pd->subport_rcvegrbuf) {
Expand All @@ -1556,6 +1556,11 @@ static int init_subports(struct ipath_devdata *dd,
pd->port_subport_id = uinfo->spu_subport_id;
pd->active_slaves = 1;
set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports);
memset(pd->subport_rcvhdr_base, 0, size);
memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks *
pd->port_rcvegrbuf_size *
num_subports);
goto bail;

bail_rhdr:
Expand Down
5 changes: 4 additions & 1 deletion trunk/drivers/infiniband/hw/ipath/ipath_init_chip.c
Original file line number Diff line number Diff line change
Expand Up @@ -442,7 +442,7 @@ static void init_shadow_tids(struct ipath_devdata *dd)
struct page **pages;
dma_addr_t *addrs;

pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
pages = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
sizeof(struct page *));
if (!pages) {
ipath_dev_err(dd, "failed to allocate shadow page * "
Expand All @@ -461,6 +461,9 @@ static void init_shadow_tids(struct ipath_devdata *dd)
return;
}

memset(pages, 0, dd->ipath_cfgports * dd->ipath_rcvtidcnt *
sizeof(struct page *));

dd->ipath_pageshadow = pages;
dd->ipath_physshadow = addrs;
}
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/infiniband/hw/ipath/ipath_user_pages.c
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
work->mm = mm;
work->num_pages = num_pages;

queue_work(ib_wq, &work->work);
schedule_work(&work->work);
return;

bail_mm:
Expand Down
3 changes: 2 additions & 1 deletion trunk/drivers/infiniband/hw/mlx4/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -1005,7 +1005,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
goto err_pd;

ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
PAGE_SIZE);
if (!ibdev->uar_map)
goto err_uar;
MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
Expand Down
7 changes: 4 additions & 3 deletions trunk/drivers/infiniband/hw/qib/qib_iba7220.c
Original file line number Diff line number Diff line change
Expand Up @@ -1692,7 +1692,8 @@ static void qib_7220_quiet_serdes(struct qib_pportdata *ppd)
ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
wake_up(&ppd->cpspec->autoneg_wait);
cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
cancel_delayed_work(&ppd->cpspec->autoneg_work);
flush_scheduled_work();

shutdown_7220_relock_poll(ppd->dd);
val = qib_read_kreg64(ppd->dd, kr_xgxs_cfg);
Expand Down Expand Up @@ -3514,8 +3515,8 @@ static void try_7220_autoneg(struct qib_pportdata *ppd)

toggle_7220_rclkrls(ppd->dd);
/* 2 msec is minimum length of a poll cycle */
queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
msecs_to_jiffies(2));
schedule_delayed_work(&ppd->cpspec->autoneg_work,
msecs_to_jiffies(2));
}

/*
Expand Down
14 changes: 7 additions & 7 deletions trunk/drivers/infiniband/hw/qib/qib_iba7322.c
Original file line number Diff line number Diff line change
Expand Up @@ -2406,9 +2406,10 @@ static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
spin_unlock_irqrestore(&ppd->lflags_lock, flags);
wake_up(&ppd->cpspec->autoneg_wait);
cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
cancel_delayed_work(&ppd->cpspec->autoneg_work);
if (ppd->dd->cspec->r1)
cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
cancel_delayed_work(&ppd->cpspec->ipg_work);
flush_scheduled_work();

ppd->cpspec->chase_end = 0;
if (ppd->cpspec->chase_timer.data) /* if initted */
Expand Down Expand Up @@ -2705,7 +2706,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
if (!(pins & mask)) {
++handled;
qd->t_insert = get_jiffies_64();
queue_work(ib_wq, &qd->work);
schedule_work(&qd->work);
}
}
}
Expand Down Expand Up @@ -4989,8 +4990,8 @@ static void try_7322_autoneg(struct qib_pportdata *ppd)
set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
qib_7322_mini_pcs_reset(ppd);
/* 2 msec is minimum length of a poll cycle */
queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
msecs_to_jiffies(2));
schedule_delayed_work(&ppd->cpspec->autoneg_work,
msecs_to_jiffies(2));
}

/*
Expand Down Expand Up @@ -5120,8 +5121,7 @@ static void try_7322_ipg(struct qib_pportdata *ppd)
ib_free_send_mad(send_buf);
retry:
delay = 2 << ppd->cpspec->ipg_tries;
queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
msecs_to_jiffies(delay));
schedule_delayed_work(&ppd->cpspec->ipg_work, msecs_to_jiffies(delay));
}

/*
Expand Down
33 changes: 27 additions & 6 deletions trunk/drivers/infiniband/hw/qib/qib_init.c
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");

struct workqueue_struct *qib_wq;
struct workqueue_struct *qib_cq_wq;

static void verify_interrupt(unsigned long);
Expand Down Expand Up @@ -269,20 +270,23 @@ static void init_shadow_tids(struct qib_devdata *dd)
struct page **pages;
dma_addr_t *addrs;

pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
pages = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
if (!pages) {
qib_dev_err(dd, "failed to allocate shadow page * "
"array, no expected sends!\n");
goto bail;
}

addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
addrs = vmalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
if (!addrs) {
qib_dev_err(dd, "failed to allocate shadow dma handle "
"array, no expected sends!\n");
goto bail_free;
}

memset(pages, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
memset(addrs, 0, dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));

dd->pageshadow = pages;
dd->physshadow = addrs;
return;
Expand Down Expand Up @@ -1043,10 +1047,24 @@ static int __init qlogic_ib_init(void)
if (ret)
goto bail;

/*
* We create our own workqueue mainly because we want to be
* able to flush it when devices are being removed. We can't
* use schedule_work()/flush_scheduled_work() because both
* unregister_netdev() and linkwatch_event take the rtnl lock,
* so flush_scheduled_work() can deadlock during device
* removal.
*/
qib_wq = create_workqueue("qib");
if (!qib_wq) {
ret = -ENOMEM;
goto bail_dev;
}

qib_cq_wq = create_singlethread_workqueue("qib_cq");
if (!qib_cq_wq) {
ret = -ENOMEM;
goto bail_dev;
goto bail_wq;
}

/*
Expand Down Expand Up @@ -1076,6 +1094,8 @@ static int __init qlogic_ib_init(void)
idr_destroy(&qib_unit_table);
bail_cq_wq:
destroy_workqueue(qib_cq_wq);
bail_wq:
destroy_workqueue(qib_wq);
bail_dev:
qib_dev_cleanup();
bail:
Expand All @@ -1099,6 +1119,7 @@ static void __exit qlogic_ib_cleanup(void)

pci_unregister_driver(&qib_driver);

destroy_workqueue(qib_wq);
destroy_workqueue(qib_cq_wq);

qib_cpulist_count = 0;
Expand Down Expand Up @@ -1271,7 +1292,7 @@ static int __devinit qib_init_one(struct pci_dev *pdev,

if (qib_mini_init || initfail || ret) {
qib_stop_timers(dd);
flush_workqueue(ib_wq);
flush_scheduled_work();
for (pidx = 0; pidx < dd->num_pports; ++pidx)
dd->f_quiet_serdes(dd->pport + pidx);
if (qib_mini_init)
Expand Down Expand Up @@ -1320,8 +1341,8 @@ static void __devexit qib_remove_one(struct pci_dev *pdev)

qib_stop_timers(dd);

/* wait until all of our (qsfp) queue_work() calls complete */
flush_workqueue(ib_wq);
/* wait until all of our (qsfp) schedule_work() calls complete */
flush_scheduled_work();

ret = qibfs_remove(dd);
if (ret)
Expand Down
Loading

0 comments on commit 81814d4

Please sign in to comment.