Skip to content

Commit

Permalink
scsi: ibmvfc: Move event pool init/free routines
Browse files Browse the repository at this point in the history
The next patch in this series reworks the event pool allocation calls to
happen within the individual queue allocation routines instead of as
independent calls.

Move the init/free routines earlier in ibmvfc.c to prevent undefined
reference errors when calling these functions from the queue allocation
code. No functional change.

Link: https://lore.kernel.org/r/20210114203148.246656-3-tyreld@linux.ibm.com
Reviewed-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: Tyrel Datwyler <tyreld@linux.ibm.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
  • Loading branch information
Tyrel Datwyler authored and Martin K. Petersen committed Jan 15, 2021
1 parent 6ae208e commit 225acf5
Showing 1 changed file with 76 additions and 75 deletions.
151 changes: 76 additions & 75 deletions drivers/scsi/ibmvscsi/ibmvfc.c
Original file line number Diff line number Diff line change
Expand Up @@ -716,6 +716,82 @@ static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
}

/**
* ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
* @vhost: ibmvfc host who owns the event pool
*
* Returns zero on success.
**/
static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
struct ibmvfc_queue *queue)
{
int i;
struct ibmvfc_event_pool *pool = &queue->evt_pool;

ENTER;
pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
if (!pool->events)
return -ENOMEM;

pool->iu_storage = dma_alloc_coherent(vhost->dev,
pool->size * sizeof(*pool->iu_storage),
&pool->iu_token, 0);

if (!pool->iu_storage) {
kfree(pool->events);
return -ENOMEM;
}

INIT_LIST_HEAD(&queue->sent);
INIT_LIST_HEAD(&queue->free);
spin_lock_init(&queue->l_lock);

for (i = 0; i < pool->size; ++i) {
struct ibmvfc_event *evt = &pool->events[i];

atomic_set(&evt->free, 1);
evt->crq.valid = 0x80;
evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
evt->xfer_iu = pool->iu_storage + i;
evt->vhost = vhost;
evt->queue = queue;
evt->ext_list = NULL;
list_add_tail(&evt->queue_list, &queue->free);
}

LEAVE;
return 0;
}

/**
* ibmvfc_free_event_pool - Frees memory of the event pool of a host
* @vhost: ibmvfc host who owns the event pool
*
**/
static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
struct ibmvfc_queue *queue)
{
int i;
struct ibmvfc_event_pool *pool = &queue->evt_pool;

ENTER;
for (i = 0; i < pool->size; ++i) {
list_del(&pool->events[i].queue_list);
BUG_ON(atomic_read(&pool->events[i].free) != 1);
if (pool->events[i].ext_list)
dma_pool_free(vhost->sg_pool,
pool->events[i].ext_list,
pool->events[i].ext_list_token);
}

kfree(pool->events);
dma_free_coherent(vhost->dev,
pool->size * sizeof(*pool->iu_storage),
pool->iu_storage, pool->iu_token);
LEAVE;
}

/**
* ibmvfc_free_queue - Deallocate queue
* @vhost: ibmvfc host struct
Expand Down Expand Up @@ -1312,81 +1388,6 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
}

/**
* ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
* @vhost: ibmvfc host who owns the event pool
*
* Returns zero on success.
**/
static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
struct ibmvfc_queue *queue)
{
int i;
struct ibmvfc_event_pool *pool = &queue->evt_pool;

ENTER;
pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
if (!pool->events)
return -ENOMEM;

pool->iu_storage = dma_alloc_coherent(vhost->dev,
pool->size * sizeof(*pool->iu_storage),
&pool->iu_token, 0);

if (!pool->iu_storage) {
kfree(pool->events);
return -ENOMEM;
}

INIT_LIST_HEAD(&queue->sent);
INIT_LIST_HEAD(&queue->free);
spin_lock_init(&queue->l_lock);

for (i = 0; i < pool->size; ++i) {
struct ibmvfc_event *evt = &pool->events[i];
atomic_set(&evt->free, 1);
evt->crq.valid = 0x80;
evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
evt->xfer_iu = pool->iu_storage + i;
evt->vhost = vhost;
evt->queue = queue;
evt->ext_list = NULL;
list_add_tail(&evt->queue_list, &queue->free);
}

LEAVE;
return 0;
}

/**
* ibmvfc_free_event_pool - Frees memory of the event pool of a host
* @vhost: ibmvfc host who owns the event pool
*
**/
static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
struct ibmvfc_queue *queue)
{
int i;
struct ibmvfc_event_pool *pool = &queue->evt_pool;

ENTER;
for (i = 0; i < pool->size; ++i) {
list_del(&pool->events[i].queue_list);
BUG_ON(atomic_read(&pool->events[i].free) != 1);
if (pool->events[i].ext_list)
dma_pool_free(vhost->sg_pool,
pool->events[i].ext_list,
pool->events[i].ext_list_token);
}

kfree(pool->events);
dma_free_coherent(vhost->dev,
pool->size * sizeof(*pool->iu_storage),
pool->iu_storage, pool->iu_token);
LEAVE;
}

/**
* ibmvfc_get_event - Gets the next free event in pool
* @vhost: ibmvfc host struct
Expand Down

0 comments on commit 225acf5

Please sign in to comment.