From fb7adfb42e7e3824150a0ffbde4c1b7fb36fa3ff Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 26 Feb 2008 09:30:32 +0100 Subject: [PATCH] --- yaml --- r: 87171 b: refs/heads/master c: 9a46d7e5b63903a70cd96c2c1391a7a26a8dbec9 h: refs/heads/master i: 87169: 2fef02919f68cc0593b92f85f9d15a69b18cb79a 87167: fdc36f69c240e2db80c0f07da386a53178bd4cd5 v: v3 --- [refs] | 2 +- trunk/MAINTAINERS | 2 +- trunk/arch/x86/mm/ioremap.c | 2 - trunk/drivers/infiniband/core/cm.c | 3 +- trunk/drivers/infiniband/core/fmr_pool.c | 38 +++++++-------- trunk/drivers/infiniband/core/iwcm.c | 5 +- .../infiniband/hw/cxgb3/iwch_provider.c | 5 +- .../drivers/infiniband/ulp/iser/iser_verbs.c | 47 +++++++++---------- trunk/kernel/sched.c | 21 ++++----- 9 files changed, 53 insertions(+), 72 deletions(-) diff --git a/[refs] b/[refs] index ff99bc9957a3..13d5e26abc90 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 051a82fc0c450f6ca649acf684586477aa6d5c6a +refs/heads/master: 9a46d7e5b63903a70cd96c2c1391a7a26a8dbec9 diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 25f450fe1059..558636e3a954 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -2156,7 +2156,7 @@ L: netdev@vger.kernel.org S: Maintained IPATH DRIVER: -P: Ralph Campbell +P: Arthur Jones M: infinipath@qlogic.com L: general@lists.openfabrics.org T: git git://git.qlogic.com/ipath-linux-2.6 diff --git a/trunk/arch/x86/mm/ioremap.c b/trunk/arch/x86/mm/ioremap.c index ac3c959e271d..8fe576baa148 100644 --- a/trunk/arch/x86/mm/ioremap.c +++ b/trunk/arch/x86/mm/ioremap.c @@ -134,8 +134,6 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, return NULL; } - WARN_ON_ONCE(page_is_ram(pfn)); - switch (mode) { case IOR_MODE_UNCACHED: default: diff --git a/trunk/drivers/infiniband/core/cm.c b/trunk/drivers/infiniband/core/cm.c index 4df405157086..b10ade92efed 100644 --- a/trunk/drivers/infiniband/core/cm.c +++ b/trunk/drivers/infiniband/core/cm.c @@ -3759,7 +3759,6 @@ static void cm_remove_one(struct ib_device *device) port = cm_dev->port[i-1]; ib_modify_port(device, port->port_num, 0, &port_modify); ib_unregister_mad_agent(port->mad_agent); - flush_workqueue(cm.wq); cm_remove_port_fs(port); } kobject_put(&cm_dev->dev_obj); @@ -3814,7 +3813,6 @@ static void __exit ib_cm_cleanup(void) cancel_delayed_work(&timewait_info->work.work); spin_unlock_irq(&cm.lock); - ib_unregister_client(&cm_client); destroy_workqueue(cm.wq); list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) { @@ -3822,6 +3820,7 @@ static void __exit ib_cm_cleanup(void) kfree(timewait_info); } + ib_unregister_client(&cm_client); class_unregister(&cm_class); idr_destroy(&cm.local_id_table); } diff --git a/trunk/drivers/infiniband/core/fmr_pool.c b/trunk/drivers/infiniband/core/fmr_pool.c index 06d502c06a4d..7f00347364f7 100644 --- a/trunk/drivers/infiniband/core/fmr_pool.c +++ b/trunk/drivers/infiniband/core/fmr_pool.c @@ -139,7 +139,7 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool, static void ib_fmr_batch_release(struct ib_fmr_pool *pool) { int ret; - struct ib_pool_fmr *fmr; + struct ib_pool_fmr *fmr, *next; LIST_HEAD(unmap_list); LIST_HEAD(fmr_list); @@ -158,6 +158,20 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool) #endif } + /* + * The free_list may hold FMRs that have been put there + * because they haven't reached the max_remap count. + * Invalidate their mapping as well. + */ + list_for_each_entry_safe(fmr, next, &pool->free_list, list) { + if (fmr->remap_count == 0) + continue; + hlist_del_init(&fmr->cache_node); + fmr->remap_count = 0; + list_add_tail(&fmr->fmr->list, &fmr_list); + list_move(&fmr->list, &unmap_list); + } + list_splice(&pool->dirty_list, &unmap_list); INIT_LIST_HEAD(&pool->dirty_list); pool->dirty_len = 0; @@ -370,11 +384,6 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) i = 0; list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { - if (fmr->remap_count) { - INIT_LIST_HEAD(&fmr_list); - list_add_tail(&fmr->fmr->list, &fmr_list); - ib_unmap_fmr(&fmr_list); - } ib_dealloc_fmr(fmr->fmr); list_del(&fmr->list); kfree(fmr); @@ -398,23 +407,8 @@ EXPORT_SYMBOL(ib_destroy_fmr_pool); */ int ib_flush_fmr_pool(struct ib_fmr_pool *pool) { - int serial; - struct ib_pool_fmr *fmr, *next; - - /* - * The free_list holds FMRs that may have been used - * but have not been remapped enough times to be dirty. - * Put them on the dirty list now so that the cleanup - * thread will reap them too. - */ - spin_lock_irq(&pool->pool_lock); - list_for_each_entry_safe(fmr, next, &pool->free_list, list) { - if (fmr->remap_count > 0) - list_move(&fmr->list, &pool->dirty_list); - } - spin_unlock_irq(&pool->pool_lock); + int serial = atomic_inc_return(&pool->req_ser); - serial = atomic_inc_return(&pool->req_ser); wake_up_process(pool->thread); if (wait_event_interruptible(pool->force_wait, diff --git a/trunk/drivers/infiniband/core/iwcm.c b/trunk/drivers/infiniband/core/iwcm.c index 81c9195b512a..223b1aa7d92b 100644 --- a/trunk/drivers/infiniband/core/iwcm.c +++ b/trunk/drivers/infiniband/core/iwcm.c @@ -839,7 +839,6 @@ static void cm_work_handler(struct work_struct *_work) unsigned long flags; int empty; int ret = 0; - int destroy_id; spin_lock_irqsave(&cm_id_priv->lock, flags); empty = list_empty(&cm_id_priv->work_list); @@ -858,9 +857,9 @@ static void cm_work_handler(struct work_struct *_work) destroy_cm_id(&cm_id_priv->id); } BUG_ON(atomic_read(&cm_id_priv->refcount)==0); - destroy_id = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags); if (iwcm_deref_id(cm_id_priv)) { - if (destroy_id) { + if (test_bit(IWCM_F_CALLBACK_DESTROY, + &cm_id_priv->flags)) { BUG_ON(!list_empty(&cm_id_priv->work_list)); free_cm_id(cm_id_priv); } diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c index b2ea9210467f..df1838f8f94d 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -189,7 +189,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve return ERR_PTR(-ENOMEM); } chp->rhp = rhp; - chp->ibcq.cqe = 1 << chp->cq.size_log2; + chp->ibcq.cqe = (1 << chp->cq.size_log2) - 1; spin_lock_init(&chp->lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); @@ -819,11 +819,8 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd, kfree(qhp); return ERR_PTR(-ENOMEM); } - attrs->cap.max_recv_wr = rqsize - 1; attrs->cap.max_send_wr = sqsize; - attrs->cap.max_inline_data = T3_MAX_INLINE; - qhp->rhp = rhp; qhp->attr.pd = php->pdid; qhp->attr.scq = ((struct iwch_cq *) attrs->send_cq)->cq.cqid; diff --git a/trunk/drivers/infiniband/ulp/iser/iser_verbs.c b/trunk/drivers/infiniband/ulp/iser/iser_verbs.c index 993f0a8ff28f..714b8db02b29 100644 --- a/trunk/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/trunk/drivers/infiniband/ulp/iser/iser_verbs.c @@ -237,32 +237,36 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn) static struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id) { - struct iser_device *device; + struct list_head *p_list; + struct iser_device *device = NULL; mutex_lock(&ig.device_list_mutex); - list_for_each_entry(device, &ig.device_list, ig_list) + p_list = ig.device_list.next; + while (p_list != &ig.device_list) { + device = list_entry(p_list, struct iser_device, ig_list); /* find if there's a match using the node GUID */ if (device->ib_device->node_guid == cma_id->device->node_guid) - goto inc_refcnt; - - device = kzalloc(sizeof *device, GFP_KERNEL); - if (device == NULL) - goto out; - - /* assign this device to the device */ - device->ib_device = cma_id->device; - /* init the device and link it into ig device list */ - if (iser_create_device_ib_res(device)) { - kfree(device); - device = NULL; - goto out; + break; } - list_add(&device->ig_list, &ig.device_list); -inc_refcnt: - device->refcount++; + if (device == NULL) { + device = kzalloc(sizeof *device, GFP_KERNEL); + if (device == NULL) + goto out; + /* assign this device to the device */ + device->ib_device = cma_id->device; + /* init the device and link it into ig device list */ + if (iser_create_device_ib_res(device)) { + kfree(device); + device = NULL; + goto out; + } + list_add(&device->ig_list, &ig.device_list); + } out: + BUG_ON(device == NULL); + device->refcount++; mutex_unlock(&ig.device_list_mutex); return device; } @@ -368,12 +372,6 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) int ret; device = iser_device_find_by_ib_device(cma_id); - if (!device) { - iser_err("device lookup/creation failed\n"); - iser_connect_error(cma_id); - return; - } - ib_conn = (struct iser_conn *)cma_id->context; ib_conn->device = device; @@ -382,6 +380,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) iser_err("resolve route failed: %d\n", ret); iser_connect_error(cma_id); } + return; } static void iser_route_handler(struct rdma_cm_id *cma_id) diff --git a/trunk/kernel/sched.c b/trunk/kernel/sched.c index 1cb53fb1fe3d..b02e4fc25645 100644 --- a/trunk/kernel/sched.c +++ b/trunk/kernel/sched.c @@ -5813,6 +5813,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) /* Must be high prio: stop_machine expects to yield to it. */ rq = task_rq_lock(p, &flags); __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); + + /* Update our root-domain */ + if (rq->rd) { + BUG_ON(!cpu_isset(cpu, rq->rd->span)); + cpu_set(cpu, rq->rd->online); + } + task_rq_unlock(rq, &flags); cpu_rq(cpu)->migration_thread = p; break; @@ -5821,15 +5828,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_ONLINE_FROZEN: /* Strictly unnecessary, as first user will wake it. */ wake_up_process(cpu_rq(cpu)->migration_thread); - - /* Update our root-domain */ - rq = cpu_rq(cpu); - spin_lock_irqsave(&rq->lock, flags); - if (rq->rd) { - BUG_ON(!cpu_isset(cpu, rq->rd->span)); - cpu_set(cpu, rq->rd->online); - } - spin_unlock_irqrestore(&rq->lock, flags); break; #ifdef CONFIG_HOTPLUG_CPU @@ -5881,8 +5879,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) spin_unlock_irq(&rq->lock); break; - case CPU_DYING: - case CPU_DYING_FROZEN: + case CPU_DOWN_PREPARE: /* Update our root-domain */ rq = cpu_rq(cpu); spin_lock_irqsave(&rq->lock, flags); @@ -6106,8 +6103,6 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) rq->rd = rd; cpu_set(rq->cpu, rd->span); - if (cpu_isset(rq->cpu, cpu_online_map)) - cpu_set(rq->cpu, rd->online); for (class = sched_class_highest; class; class = class->next) { if (class->join_domain)