Skip to content

Commit

Permalink
net-next/hinic:replace multiply and division operators
Browse files Browse the repository at this point in the history
To improve performance, this patch uses bit operations to replace
multiply and division operators.

Signed-off-by: Xue Chaojing <xuechaojing@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Xue Chaojing authored and David S. Miller committed Nov 20, 2018
1 parent a421ce0 commit ebda9b4
Show file tree
Hide file tree
Showing 2 changed files with 38 additions and 20 deletions.
55 changes: 36 additions & 19 deletions drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,12 +74,6 @@
((void *)((cmdq_pages)->shadow_page_vaddr) \
+ (wq)->block_idx * CMDQ_BLOCK_SIZE)

#define WQE_PAGE_OFF(wq, idx) (((idx) & ((wq)->num_wqebbs_per_page - 1)) * \
(wq)->wqebb_size)

#define WQE_PAGE_NUM(wq, idx) (((idx) / ((wq)->num_wqebbs_per_page)) \
& ((wq)->num_q_pages - 1))

#define WQ_PAGE_ADDR(wq, idx) \
((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])

Expand All @@ -93,6 +87,17 @@
(((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
/ (wq)->max_wqe_size)

static inline int WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx)
{
return (((idx) & ((wq)->num_wqebbs_per_page - 1))
<< (wq)->wqebb_size_shift);
}

static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx)
{
return (((idx) >> ((wq)->wqebbs_per_page_shift))
& ((wq)->num_q_pages - 1));
}
/**
* queue_alloc_page - allocate page for Queue
* @hwif: HW interface for allocating DMA
Expand Down Expand Up @@ -513,10 +518,11 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
struct hinic_hwif *hwif = wqs->hwif;
struct pci_dev *pdev = hwif->pdev;
u16 num_wqebbs_per_page;
u16 wqebb_size_shift;
int err;

if (wqebb_size == 0) {
dev_err(&pdev->dev, "wqebb_size must be > 0\n");
if (!is_power_of_2(wqebb_size)) {
dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
return -EINVAL;
}

Expand All @@ -530,9 +536,11 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
return -EINVAL;
}

num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
wqebb_size_shift = ilog2(wqebb_size);
num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
>> wqebb_size_shift;

if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
if (!is_power_of_2(num_wqebbs_per_page)) {
dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
return -EINVAL;
}
Expand All @@ -550,7 +558,8 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
wq->q_depth = q_depth;
wq->max_wqe_size = max_wqe_size;
wq->num_wqebbs_per_page = num_wqebbs_per_page;

wq->wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
wq->wqebb_size_shift = wqebb_size_shift;
wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
Expand Down Expand Up @@ -604,11 +613,13 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
u16 q_depth, u16 max_wqe_size)
{
struct pci_dev *pdev = hwif->pdev;
u16 num_wqebbs_per_page_shift;
u16 num_wqebbs_per_page;
u16 wqebb_size_shift;
int i, j, err = -ENOMEM;

if (wqebb_size == 0) {
dev_err(&pdev->dev, "wqebb_size must be > 0\n");
if (!is_power_of_2(wqebb_size)) {
dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
return -EINVAL;
}

Expand All @@ -622,9 +633,11 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
return -EINVAL;
}

num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
wqebb_size_shift = ilog2(wqebb_size);
num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
>> wqebb_size_shift;

if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
if (!is_power_of_2(num_wqebbs_per_page)) {
dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
return -EINVAL;
}
Expand All @@ -636,6 +649,7 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
dev_err(&pdev->dev, "Failed to allocate CMDQ page\n");
return err;
}
num_wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);

for (i = 0; i < cmdq_blocks; i++) {
wq[i].hwif = hwif;
Expand All @@ -647,7 +661,8 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
wq[i].q_depth = q_depth;
wq[i].max_wqe_size = max_wqe_size;
wq[i].num_wqebbs_per_page = num_wqebbs_per_page;

wq[i].wqebbs_per_page_shift = num_wqebbs_per_page_shift;
wq[i].wqebb_size_shift = wqebb_size_shift;
wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
Expand Down Expand Up @@ -741,7 +756,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,

*prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx));

num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift;

if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) {
atomic_add(num_wqebbs, &wq->delta);
Expand Down Expand Up @@ -795,7 +810,8 @@ void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size)
**/
void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
{
int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
>> wq->wqebb_size_shift;

atomic_add(num_wqebbs, &wq->cons_idx);

Expand All @@ -813,7 +829,8 @@ void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
u16 *cons_idx)
{
int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
>> wq->wqebb_size_shift;
u16 curr_cons_idx, end_cons_idx;
int curr_pg, end_pg;

Expand Down
3 changes: 2 additions & 1 deletion drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ struct hinic_wq {
u16 q_depth;
u16 max_wqe_size;
u16 num_wqebbs_per_page;

u16 wqebbs_per_page_shift;
u16 wqebb_size_shift;
/* The addresses are 64 bit in the HW */
u64 block_paddr;
void **shadow_block_vaddr;
Expand Down

0 comments on commit ebda9b4

Please sign in to comment.