Skip to content

Commit

Permalink
IB/ipath: Don't corrupt pending mmap list when unmapped objects are f…
Browse files Browse the repository at this point in the history
…reed

Fix the pending mmap code so it doesn't corrupt the list of pending
mmaps and crash the machine when pending mmaps are destroyed without
first being mapped.  Also, remove an unused variable, and use standard
kernel lists instead of our own homebrewed linked list implementation
to keep the pending mmap list.

Signed-off-by: Robert Walsh <robert.walsh@qlogic.com>
Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
  • Loading branch information
Robert Walsh authored and Roland Dreier committed May 1, 2007
1 parent 9ba6d55 commit 6b66b2d
Show file tree
Hide file tree
Showing 6 changed files with 153 additions and 90 deletions.
51 changes: 22 additions & 29 deletions drivers/infiniband/hw/ipath/ipath_cq.c
Original file line number Diff line number Diff line change
Expand Up @@ -243,46 +243,40 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
* See ipath_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
struct ipath_mmap_info *ip;
__u64 offset = (__u64) wc;
int err;
u32 s = sizeof *wc + sizeof(struct ib_wc) * entries;

err = ib_copy_to_udata(udata, &offset, sizeof(offset));
if (err) {
ret = ERR_PTR(err);
cq->ip = ipath_create_mmap_info(dev, s, context, wc);
if (!cq->ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_wc;
}

/* Allocate info for ipath_mmap(). */
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
if (!ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_wc;
err = ib_copy_to_udata(udata, &cq->ip->offset,
sizeof(cq->ip->offset));
if (err) {
ret = ERR_PTR(err);
goto bail_ip;
}
cq->ip = ip;
ip->context = context;
ip->obj = wc;
kref_init(&ip->ref);
ip->mmap_cnt = 0;
ip->size = PAGE_ALIGN(sizeof(*wc) +
sizeof(struct ib_wc) * entries);
spin_lock_irq(&dev->pending_lock);
ip->next = dev->pending_mmaps;
dev->pending_mmaps = ip;
spin_unlock_irq(&dev->pending_lock);
} else
cq->ip = NULL;

spin_lock(&dev->n_cqs_lock);
if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
spin_unlock(&dev->n_cqs_lock);
ret = ERR_PTR(-ENOMEM);
goto bail_wc;
goto bail_ip;
}

dev->n_cqs_allocated++;
spin_unlock(&dev->n_cqs_lock);

if (cq->ip) {
spin_lock_irq(&dev->pending_lock);
list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
spin_unlock_irq(&dev->pending_lock);
}

/*
* ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
* The number of entries should be >= the number requested or return
Expand All @@ -301,12 +295,12 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,

goto done;

bail_ip:
kfree(cq->ip);
bail_wc:
vfree(wc);

bail_cq:
kfree(cq);

done:
return ret;
}
Expand Down Expand Up @@ -443,13 +437,12 @@ int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
if (cq->ip) {
struct ipath_ibdev *dev = to_idev(ibcq->device);
struct ipath_mmap_info *ip = cq->ip;
u32 s = sizeof *wc + sizeof(struct ib_wc) * cqe;

ip->obj = wc;
ip->size = PAGE_ALIGN(sizeof(*wc) +
sizeof(struct ib_wc) * cqe);
ipath_update_mmap_info(dev, ip, s, wc);
spin_lock_irq(&dev->pending_lock);
ip->next = dev->pending_mmaps;
dev->pending_mmaps = ip;
if (list_empty(&ip->pending_mmaps))
list_add(&ip->pending_mmaps, &dev->pending_mmaps);
spin_unlock_irq(&dev->pending_lock);
}

Expand Down
64 changes: 58 additions & 6 deletions drivers/infiniband/hw/ipath/ipath_mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,11 @@ void ipath_release_mmap_info(struct kref *ref)
{
struct ipath_mmap_info *ip =
container_of(ref, struct ipath_mmap_info, ref);
struct ipath_ibdev *dev = to_idev(ip->context->device);

spin_lock_irq(&dev->pending_lock);
list_del(&ip->pending_mmaps);
spin_unlock_irq(&dev->pending_lock);

vfree(ip->obj);
kfree(ip);
Expand All @@ -60,14 +65,12 @@ static void ipath_vma_open(struct vm_area_struct *vma)
struct ipath_mmap_info *ip = vma->vm_private_data;

kref_get(&ip->ref);
ip->mmap_cnt++;
}

static void ipath_vma_close(struct vm_area_struct *vma)
{
struct ipath_mmap_info *ip = vma->vm_private_data;

ip->mmap_cnt--;
kref_put(&ip->ref, ipath_release_mmap_info);
}

Expand All @@ -87,7 +90,7 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
struct ipath_ibdev *dev = to_idev(context->device);
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long size = vma->vm_end - vma->vm_start;
struct ipath_mmap_info *ip, **pp;
struct ipath_mmap_info *ip, *pp;
int ret = -EINVAL;

/*
Expand All @@ -96,15 +99,16 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
* CQ, QP, or SRQ is soon followed by a call to mmap().
*/
spin_lock_irq(&dev->pending_lock);
for (pp = &dev->pending_mmaps; (ip = *pp); pp = &ip->next) {
list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
pending_mmaps) {
/* Only the creator is allowed to mmap the object */
if (context != ip->context || (void *) offset != ip->obj)
if (context != ip->context || (__u64) offset != ip->offset)
continue;
/* Don't allow a mmap larger than the object. */
if (size > ip->size)
break;

*pp = ip->next;
list_del_init(&ip->pending_mmaps);
spin_unlock_irq(&dev->pending_lock);

ret = remap_vmalloc_range(vma, ip->obj, 0);
Expand All @@ -119,3 +123,51 @@ int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
done:
return ret;
}

/*
* Allocate information for ipath_mmap
*/
struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
u32 size,
struct ib_ucontext *context,
void *obj) {
struct ipath_mmap_info *ip;

ip = kmalloc(sizeof *ip, GFP_KERNEL);
if (!ip)
goto bail;

size = PAGE_ALIGN(size);

spin_lock_irq(&dev->mmap_offset_lock);
if (dev->mmap_offset == 0)
dev->mmap_offset = PAGE_SIZE;
ip->offset = dev->mmap_offset;
dev->mmap_offset += size;
spin_unlock_irq(&dev->mmap_offset_lock);

INIT_LIST_HEAD(&ip->pending_mmaps);
ip->size = size;
ip->context = context;
ip->obj = obj;
kref_init(&ip->ref);

bail:
return ip;
}

void ipath_update_mmap_info(struct ipath_ibdev *dev,
struct ipath_mmap_info *ip,
u32 size, void *obj) {
size = PAGE_ALIGN(size);

spin_lock_irq(&dev->mmap_offset_lock);
if (dev->mmap_offset == 0)
dev->mmap_offset = PAGE_SIZE;
ip->offset = dev->mmap_offset;
dev->mmap_offset += size;
spin_unlock_irq(&dev->mmap_offset_lock);

ip->size = size;
ip->obj = obj;
}
52 changes: 30 additions & 22 deletions drivers/infiniband/hw/ipath/ipath_qp.c
Original file line number Diff line number Diff line change
Expand Up @@ -844,34 +844,36 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
* See ipath_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
struct ipath_mmap_info *ip;
__u64 offset = (__u64) qp->r_rq.wq;
int err;

err = ib_copy_to_udata(udata, &offset, sizeof(offset));
if (err) {
ret = ERR_PTR(err);
goto bail_rwq;
}
if (!qp->r_rq.wq) {
__u64 offset = 0;

if (qp->r_rq.wq) {
/* Allocate info for ipath_mmap(). */
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
if (!ip) {
err = ib_copy_to_udata(udata, &offset,
sizeof(offset));
if (err) {
ret = ERR_PTR(err);
goto bail_rwq;
}
} else {
u32 s = sizeof(struct ipath_rwq) +
qp->r_rq.size * sz;

qp->ip =
ipath_create_mmap_info(dev, s,
ibpd->uobject->context,
qp->r_rq.wq);
if (!qp->ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_rwq;
}
qp->ip = ip;
ip->context = ibpd->uobject->context;
ip->obj = qp->r_rq.wq;
kref_init(&ip->ref);
ip->mmap_cnt = 0;
ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
qp->r_rq.size * sz);
spin_lock_irq(&dev->pending_lock);
ip->next = dev->pending_mmaps;
dev->pending_mmaps = ip;
spin_unlock_irq(&dev->pending_lock);

err = ib_copy_to_udata(udata, &(qp->ip->offset),
sizeof(qp->ip->offset));
if (err) {
ret = ERR_PTR(err);
goto bail_ip;
}
}
}

Expand All @@ -885,6 +887,12 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
dev->n_qps_allocated++;
spin_unlock(&dev->n_qps_lock);

if (qp->ip) {
spin_lock_irq(&dev->pending_lock);
list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
spin_unlock_irq(&dev->pending_lock);
}

ret = &qp->ibqp;
goto bail;

Expand Down
55 changes: 26 additions & 29 deletions drivers/infiniband/hw/ipath/ipath_srq.c
Original file line number Diff line number Diff line change
Expand Up @@ -139,33 +139,24 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
* See ipath_mmap() for details.
*/
if (udata && udata->outlen >= sizeof(__u64)) {
struct ipath_mmap_info *ip;
__u64 offset = (__u64) srq->rq.wq;
int err;
u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz;

err = ib_copy_to_udata(udata, &offset, sizeof(offset));
if (err) {
ret = ERR_PTR(err);
srq->ip =
ipath_create_mmap_info(dev, s,
ibpd->uobject->context,
srq->rq.wq);
if (!srq->ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_wq;
}

/* Allocate info for ipath_mmap(). */
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
if (!ip) {
ret = ERR_PTR(-ENOMEM);
goto bail_wq;
err = ib_copy_to_udata(udata, &srq->ip->offset,
sizeof(srq->ip->offset));
if (err) {
ret = ERR_PTR(err);
goto bail_ip;
}
srq->ip = ip;
ip->context = ibpd->uobject->context;
ip->obj = srq->rq.wq;
kref_init(&ip->ref);
ip->mmap_cnt = 0;
ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
srq->rq.size * sz);
spin_lock_irq(&dev->pending_lock);
ip->next = dev->pending_mmaps;
dev->pending_mmaps = ip;
spin_unlock_irq(&dev->pending_lock);
} else
srq->ip = NULL;

Expand All @@ -181,21 +172,27 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
spin_unlock(&dev->n_srqs_lock);
ret = ERR_PTR(-ENOMEM);
goto bail_wq;
goto bail_ip;
}

dev->n_srqs_allocated++;
spin_unlock(&dev->n_srqs_lock);

if (srq->ip) {
spin_lock_irq(&dev->pending_lock);
list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
spin_unlock_irq(&dev->pending_lock);
}

ret = &srq->ibsrq;
goto done;

bail_ip:
kfree(srq->ip);
bail_wq:
vfree(srq->rq.wq);

bail_srq:
kfree(srq);

done:
return ret;
}
Expand Down Expand Up @@ -312,13 +309,13 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
if (srq->ip) {
struct ipath_mmap_info *ip = srq->ip;
struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
u32 s = sizeof(struct ipath_rwq) + size * sz;

ip->obj = wq;
ip->size = PAGE_ALIGN(sizeof(struct ipath_rwq) +
size * sz);
ipath_update_mmap_info(dev, ip, s, wq);
spin_lock_irq(&dev->pending_lock);
ip->next = dev->pending_mmaps;
dev->pending_mmaps = ip;
if (list_empty(&ip->pending_mmaps))
list_add(&ip->pending_mmaps,
&dev->pending_mmaps);
spin_unlock_irq(&dev->pending_lock);
}
} else if (attr_mask & IB_SRQ_LIMIT) {
Expand Down
3 changes: 3 additions & 0 deletions drivers/infiniband/hw/ipath/ipath_verbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1476,7 +1476,10 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
ret = -ENOMEM;
goto err_lk;
}
INIT_LIST_HEAD(&idev->pending_mmaps);
spin_lock_init(&idev->pending_lock);
idev->mmap_offset = PAGE_SIZE;
spin_lock_init(&idev->mmap_offset_lock);
INIT_LIST_HEAD(&idev->pending[0]);
INIT_LIST_HEAD(&idev->pending[1]);
INIT_LIST_HEAD(&idev->pending[2]);
Expand Down
Loading

0 comments on commit 6b66b2d

Please sign in to comment.