Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 6158
b: refs/heads/master
c: 87b8167
h: refs/heads/master
v: v3
  • Loading branch information
Roland Dreier authored and Roland Dreier committed Aug 27, 2005
1 parent 407b366 commit f0e5106
Show file tree
Hide file tree
Showing 6 changed files with 142 additions and 225 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: f520ba5aa48e2891c3fb3e364eeaaab4212c7c45
refs/heads/master: 87b816706bb2b79fbaff8e0b8e279e783273383e
116 changes: 116 additions & 0 deletions trunk/drivers/infiniband/hw/mthca/mthca_allocator.c
Original file line number Diff line number Diff line change
Expand Up @@ -177,3 +177,119 @@ void mthca_array_cleanup(struct mthca_array *array, int nent)

kfree(array->page_list);
}

/*
* Handling for queue buffers -- we allocate a bunch of memory and
* register it in a memory region at HCA virtual address 0. If the
* requested size is > max_direct, we split the allocation into
* multiple pages, so we don't require too much contiguous memory.
*/

int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
int hca_write, struct mthca_mr *mr)
{
int err = -ENOMEM;
int npages, shift;
u64 *dma_list = NULL;
dma_addr_t t;
int i;

if (size <= max_direct) {
*is_direct = 1;
npages = 1;
shift = get_order(size) + PAGE_SHIFT;

buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
size, &t, GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;

pci_unmap_addr_set(&buf->direct, mapping, t);

memset(buf->direct.buf, 0, size);

while (t & ((1 << shift) - 1)) {
--shift;
npages *= 2;
}

dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
if (!dma_list)
goto err_free;

for (i = 0; i < npages; ++i)
dma_list[i] = t + i * (1 << shift);
} else {
*is_direct = 0;
npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
shift = PAGE_SHIFT;

dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
if (!dma_list)
return -ENOMEM;

buf->page_list = kmalloc(npages * sizeof *buf->page_list,
GFP_KERNEL);
if (!buf->page_list)
goto err_out;

for (i = 0; i < npages; ++i)
buf->page_list[i].buf = NULL;

for (i = 0; i < npages; ++i) {
buf->page_list[i].buf =
dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
&t, GFP_KERNEL);
if (!buf->page_list[i].buf)
goto err_free;

dma_list[i] = t;
pci_unmap_addr_set(&buf->page_list[i], mapping, t);

memset(buf->page_list[i].buf, 0, PAGE_SIZE);
}
}

err = mthca_mr_alloc_phys(dev, pd->pd_num,
dma_list, shift, npages,
0, size,
MTHCA_MPT_FLAG_LOCAL_READ |
(hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0),
mr);
if (err)
goto err_free;

kfree(dma_list);

return 0;

err_free:
mthca_buf_free(dev, size, buf, *is_direct, NULL);

err_out:
kfree(dma_list);

return err;
}

void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
int is_direct, struct mthca_mr *mr)
{
int i;

if (mr)
mthca_free_mr(dev, mr);

if (is_direct)
dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
pci_unmap_addr(&buf->direct, mapping));
else {
for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
buf->page_list[i].buf,
pci_unmap_addr(&buf->page_list[i],
mapping));
kfree(buf->page_list);
}
}
118 changes: 6 additions & 112 deletions trunk/drivers/infiniband/hw/mthca/mthca_cq.c
Original file line number Diff line number Diff line change
Expand Up @@ -639,113 +639,8 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)

static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq)
{
int i;
int size;

if (cq->is_direct)
dma_free_coherent(&dev->pdev->dev,
(cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
cq->queue.direct.buf,
pci_unmap_addr(&cq->queue.direct,
mapping));
else {
size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE;
for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
if (cq->queue.page_list[i].buf)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
cq->queue.page_list[i].buf,
pci_unmap_addr(&cq->queue.page_list[i],
mapping));

kfree(cq->queue.page_list);
}
}

static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size,
struct mthca_cq *cq)
{
int err = -ENOMEM;
int npages, shift;
u64 *dma_list = NULL;
dma_addr_t t;
int i;

if (size <= MTHCA_MAX_DIRECT_CQ_SIZE) {
cq->is_direct = 1;
npages = 1;
shift = get_order(size) + PAGE_SHIFT;

cq->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
size, &t, GFP_KERNEL);
if (!cq->queue.direct.buf)
return -ENOMEM;

pci_unmap_addr_set(&cq->queue.direct, mapping, t);

memset(cq->queue.direct.buf, 0, size);

while (t & ((1 << shift) - 1)) {
--shift;
npages *= 2;
}

dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
if (!dma_list)
goto err_free;

for (i = 0; i < npages; ++i)
dma_list[i] = t + i * (1 << shift);
} else {
cq->is_direct = 0;
npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
shift = PAGE_SHIFT;

dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
if (!dma_list)
return -ENOMEM;

cq->queue.page_list = kmalloc(npages * sizeof *cq->queue.page_list,
GFP_KERNEL);
if (!cq->queue.page_list)
goto err_out;

for (i = 0; i < npages; ++i)
cq->queue.page_list[i].buf = NULL;

for (i = 0; i < npages; ++i) {
cq->queue.page_list[i].buf =
dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
&t, GFP_KERNEL);
if (!cq->queue.page_list[i].buf)
goto err_free;

dma_list[i] = t;
pci_unmap_addr_set(&cq->queue.page_list[i], mapping, t);

memset(cq->queue.page_list[i].buf, 0, PAGE_SIZE);
}
}

err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
dma_list, shift, npages,
0, size,
MTHCA_MPT_FLAG_LOCAL_WRITE |
MTHCA_MPT_FLAG_LOCAL_READ,
&cq->mr);
if (err)
goto err_free;

kfree(dma_list);

return 0;

err_free:
mthca_free_cq_buf(dev, cq);

err_out:
kfree(dma_list);

return err;
mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
&cq->queue, cq->is_direct, &cq->mr);
}

int mthca_init_cq(struct mthca_dev *dev, int nent,
Expand Down Expand Up @@ -797,7 +692,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
cq_context = mailbox->buf;

if (cq->is_kernel) {
err = mthca_alloc_cq_buf(dev, size, cq);
err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE,
&cq->queue, &cq->is_direct,
&dev->driver_pd, 1, &cq->mr);
if (err)
goto err_out_mailbox;

Expand Down Expand Up @@ -858,10 +755,8 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
return 0;

err_out_free_mr:
if (cq->is_kernel) {
mthca_free_mr(dev, &cq->mr);
if (cq->is_kernel)
mthca_free_cq_buf(dev, cq);
}

err_out_mailbox:
mthca_free_mailbox(dev, mailbox);
Expand Down Expand Up @@ -929,7 +824,6 @@ void mthca_free_cq(struct mthca_dev *dev,
wait_event(cq->wait, !atomic_read(&cq->refcount));

if (cq->is_kernel) {
mthca_free_mr(dev, &cq->mr);
mthca_free_cq_buf(dev, cq);
if (mthca_is_memfree(dev)) {
mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
Expand Down
5 changes: 5 additions & 0 deletions trunk/drivers/infiniband/hw/mthca/mthca_dev.h
Original file line number Diff line number Diff line change
Expand Up @@ -361,6 +361,11 @@ int mthca_array_set(struct mthca_array *array, int index, void *value);
void mthca_array_clear(struct mthca_array *array, int index);
int mthca_array_init(struct mthca_array *array, int nent);
void mthca_array_cleanup(struct mthca_array *array, int nent);
int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
int hca_write, struct mthca_mr *mr);
void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
int is_direct, struct mthca_mr *mr);

int mthca_init_uar_table(struct mthca_dev *dev);
int mthca_init_pd_table(struct mthca_dev *dev);
Expand Down
15 changes: 7 additions & 8 deletions trunk/drivers/infiniband/hw/mthca/mthca_provider.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,11 @@ struct mthca_buf_list {
DECLARE_PCI_UNMAP_ADDR(mapping)
};

union mthca_buf {
struct mthca_buf_list direct;
struct mthca_buf_list *page_list;
};

struct mthca_uar {
unsigned long pfn;
int index;
Expand Down Expand Up @@ -187,10 +192,7 @@ struct mthca_cq {
__be32 *arm_db;
int arm_sn;

union {
struct mthca_buf_list direct;
struct mthca_buf_list *page_list;
} queue;
union mthca_buf queue;
struct mthca_mr mr;
wait_queue_head_t wait;
};
Expand Down Expand Up @@ -228,10 +230,7 @@ struct mthca_qp {
int send_wqe_offset;

u64 *wrid;
union {
struct mthca_buf_list direct;
struct mthca_buf_list *page_list;
} queue;
union mthca_buf queue;

wait_queue_head_t wait;
};
Expand Down
Loading

0 comments on commit f0e5106

Please sign in to comment.