mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
scsi: ibmvfc: Move event pool init/free routines
The next patch in this series reworks the event pool allocation calls to happen within the individual queue allocation routines instead of as independent calls. Move the init/free routines earlier in ibmvfc.c to prevent undefined reference errors when calling these functions from the queue allocation code. No functional change. Link: https://lore.kernel.org/r/20210114203148.246656-3-tyreld@linux.ibm.com Reviewed-by: Brian King <brking@linux.vnet.ibm.com> Signed-off-by: Tyrel Datwyler <tyreld@linux.ibm.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
6ae208e5d2
commit
225acf5f1a
1 changed files with 76 additions and 75 deletions
|
@ -716,6 +716,82 @@ static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
|
|||
return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
|
||||
* @vhost: ibmvfc host who owns the event pool
|
||||
*
|
||||
* Returns zero on success.
|
||||
**/
|
||||
static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
|
||||
struct ibmvfc_queue *queue)
|
||||
{
|
||||
int i;
|
||||
struct ibmvfc_event_pool *pool = &queue->evt_pool;
|
||||
|
||||
ENTER;
|
||||
pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
|
||||
pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
|
||||
if (!pool->events)
|
||||
return -ENOMEM;
|
||||
|
||||
pool->iu_storage = dma_alloc_coherent(vhost->dev,
|
||||
pool->size * sizeof(*pool->iu_storage),
|
||||
&pool->iu_token, 0);
|
||||
|
||||
if (!pool->iu_storage) {
|
||||
kfree(pool->events);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&queue->sent);
|
||||
INIT_LIST_HEAD(&queue->free);
|
||||
spin_lock_init(&queue->l_lock);
|
||||
|
||||
for (i = 0; i < pool->size; ++i) {
|
||||
struct ibmvfc_event *evt = &pool->events[i];
|
||||
|
||||
atomic_set(&evt->free, 1);
|
||||
evt->crq.valid = 0x80;
|
||||
evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
|
||||
evt->xfer_iu = pool->iu_storage + i;
|
||||
evt->vhost = vhost;
|
||||
evt->queue = queue;
|
||||
evt->ext_list = NULL;
|
||||
list_add_tail(&evt->queue_list, &queue->free);
|
||||
}
|
||||
|
||||
LEAVE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ibmvfc_free_event_pool - Frees memory of the event pool of a host
|
||||
* @vhost: ibmvfc host who owns the event pool
|
||||
*
|
||||
**/
|
||||
static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
|
||||
struct ibmvfc_queue *queue)
|
||||
{
|
||||
int i;
|
||||
struct ibmvfc_event_pool *pool = &queue->evt_pool;
|
||||
|
||||
ENTER;
|
||||
for (i = 0; i < pool->size; ++i) {
|
||||
list_del(&pool->events[i].queue_list);
|
||||
BUG_ON(atomic_read(&pool->events[i].free) != 1);
|
||||
if (pool->events[i].ext_list)
|
||||
dma_pool_free(vhost->sg_pool,
|
||||
pool->events[i].ext_list,
|
||||
pool->events[i].ext_list_token);
|
||||
}
|
||||
|
||||
kfree(pool->events);
|
||||
dma_free_coherent(vhost->dev,
|
||||
pool->size * sizeof(*pool->iu_storage),
|
||||
pool->iu_storage, pool->iu_token);
|
||||
LEAVE;
|
||||
}
|
||||
|
||||
/**
|
||||
* ibmvfc_free_queue - Deallocate queue
|
||||
* @vhost: ibmvfc host struct
|
||||
|
@ -1312,81 +1388,6 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
|
|||
strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
|
||||
}
|
||||
|
||||
/**
|
||||
* ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
|
||||
* @vhost: ibmvfc host who owns the event pool
|
||||
*
|
||||
* Returns zero on success.
|
||||
**/
|
||||
static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
|
||||
struct ibmvfc_queue *queue)
|
||||
{
|
||||
int i;
|
||||
struct ibmvfc_event_pool *pool = &queue->evt_pool;
|
||||
|
||||
ENTER;
|
||||
pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
|
||||
pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
|
||||
if (!pool->events)
|
||||
return -ENOMEM;
|
||||
|
||||
pool->iu_storage = dma_alloc_coherent(vhost->dev,
|
||||
pool->size * sizeof(*pool->iu_storage),
|
||||
&pool->iu_token, 0);
|
||||
|
||||
if (!pool->iu_storage) {
|
||||
kfree(pool->events);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&queue->sent);
|
||||
INIT_LIST_HEAD(&queue->free);
|
||||
spin_lock_init(&queue->l_lock);
|
||||
|
||||
for (i = 0; i < pool->size; ++i) {
|
||||
struct ibmvfc_event *evt = &pool->events[i];
|
||||
atomic_set(&evt->free, 1);
|
||||
evt->crq.valid = 0x80;
|
||||
evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
|
||||
evt->xfer_iu = pool->iu_storage + i;
|
||||
evt->vhost = vhost;
|
||||
evt->queue = queue;
|
||||
evt->ext_list = NULL;
|
||||
list_add_tail(&evt->queue_list, &queue->free);
|
||||
}
|
||||
|
||||
LEAVE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ibmvfc_free_event_pool - Frees memory of the event pool of a host
|
||||
* @vhost: ibmvfc host who owns the event pool
|
||||
*
|
||||
**/
|
||||
static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
|
||||
struct ibmvfc_queue *queue)
|
||||
{
|
||||
int i;
|
||||
struct ibmvfc_event_pool *pool = &queue->evt_pool;
|
||||
|
||||
ENTER;
|
||||
for (i = 0; i < pool->size; ++i) {
|
||||
list_del(&pool->events[i].queue_list);
|
||||
BUG_ON(atomic_read(&pool->events[i].free) != 1);
|
||||
if (pool->events[i].ext_list)
|
||||
dma_pool_free(vhost->sg_pool,
|
||||
pool->events[i].ext_list,
|
||||
pool->events[i].ext_list_token);
|
||||
}
|
||||
|
||||
kfree(pool->events);
|
||||
dma_free_coherent(vhost->dev,
|
||||
pool->size * sizeof(*pool->iu_storage),
|
||||
pool->iu_storage, pool->iu_token);
|
||||
LEAVE;
|
||||
}
|
||||
|
||||
/**
|
||||
* ibmvfc_get_event - Gets the next free event in pool
|
||||
* @vhost: ibmvfc host struct
|
||||
|
|
Loading…
Add table
Reference in a new issue