mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-11-17 12:14:43 +00:00
drm/amdkfd: move locking outside of unmap_queues_cpsch
Signed-off-by: Yong Zhao <yong.zhao@amd.com> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Oded Gabbay <oded.gabbay@gmail.com> Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
This commit is contained in:
parent
7da2bcf876
commit
ac30c78384
1 changed files with 28 additions and 37 deletions
|
|
@ -44,9 +44,9 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
|
||||||
struct queue *q,
|
struct queue *q,
|
||||||
struct qcm_process_device *qpd);
|
struct qcm_process_device *qpd);
|
||||||
|
|
||||||
static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
|
static int execute_queues_cpsch(struct device_queue_manager *dqm);
|
||||||
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
||||||
bool static_queues_included, bool lock);
|
bool static_queues_included);
|
||||||
|
|
||||||
static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
|
static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
|
||||||
struct queue *q,
|
struct queue *q,
|
||||||
|
|
@ -379,7 +379,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
|
||||||
dqm->queue_count--;
|
dqm->queue_count--;
|
||||||
|
|
||||||
if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
|
if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
|
||||||
retval = execute_queues_cpsch(dqm, false);
|
retval = execute_queues_cpsch(dqm);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&dqm->lock);
|
mutex_unlock(&dqm->lock);
|
||||||
|
|
@ -695,7 +695,9 @@ static int start_cpsch(struct device_queue_manager *dqm)
|
||||||
|
|
||||||
init_interrupts(dqm);
|
init_interrupts(dqm);
|
||||||
|
|
||||||
execute_queues_cpsch(dqm, true);
|
mutex_lock(&dqm->lock);
|
||||||
|
execute_queues_cpsch(dqm);
|
||||||
|
mutex_unlock(&dqm->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
fail_allocate_vidmem:
|
fail_allocate_vidmem:
|
||||||
|
|
@ -707,7 +709,9 @@ fail_packet_manager_init:
|
||||||
|
|
||||||
static int stop_cpsch(struct device_queue_manager *dqm)
|
static int stop_cpsch(struct device_queue_manager *dqm)
|
||||||
{
|
{
|
||||||
unmap_queues_cpsch(dqm, true, true);
|
mutex_lock(&dqm->lock);
|
||||||
|
unmap_queues_cpsch(dqm, true);
|
||||||
|
mutex_unlock(&dqm->lock);
|
||||||
|
|
||||||
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
|
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
|
||||||
pm_uninit(&dqm->packets);
|
pm_uninit(&dqm->packets);
|
||||||
|
|
@ -738,7 +742,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
|
||||||
list_add(&kq->list, &qpd->priv_queue_list);
|
list_add(&kq->list, &qpd->priv_queue_list);
|
||||||
dqm->queue_count++;
|
dqm->queue_count++;
|
||||||
qpd->is_debug = true;
|
qpd->is_debug = true;
|
||||||
execute_queues_cpsch(dqm, false);
|
execute_queues_cpsch(dqm);
|
||||||
mutex_unlock(&dqm->lock);
|
mutex_unlock(&dqm->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
@ -750,11 +754,11 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
|
||||||
{
|
{
|
||||||
mutex_lock(&dqm->lock);
|
mutex_lock(&dqm->lock);
|
||||||
/* here we actually preempt the DIQ */
|
/* here we actually preempt the DIQ */
|
||||||
unmap_queues_cpsch(dqm, true, false);
|
unmap_queues_cpsch(dqm, true);
|
||||||
list_del(&kq->list);
|
list_del(&kq->list);
|
||||||
dqm->queue_count--;
|
dqm->queue_count--;
|
||||||
qpd->is_debug = false;
|
qpd->is_debug = false;
|
||||||
execute_queues_cpsch(dqm, false);
|
execute_queues_cpsch(dqm);
|
||||||
/*
|
/*
|
||||||
* Unconditionally decrement this counter, regardless of the queue's
|
* Unconditionally decrement this counter, regardless of the queue's
|
||||||
* type.
|
* type.
|
||||||
|
|
@ -813,7 +817,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
|
||||||
list_add(&q->list, &qpd->queues_list);
|
list_add(&q->list, &qpd->queues_list);
|
||||||
if (q->properties.is_active) {
|
if (q->properties.is_active) {
|
||||||
dqm->queue_count++;
|
dqm->queue_count++;
|
||||||
retval = execute_queues_cpsch(dqm, false);
|
retval = execute_queues_cpsch(dqm);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
|
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
|
||||||
|
|
@ -857,8 +861,9 @@ static int unmap_sdma_queues(struct device_queue_manager *dqm,
|
||||||
sdma_engine);
|
sdma_engine);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* dqm->lock mutex has to be locked before calling this function */
|
||||||
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
||||||
bool static_queues_included, bool lock)
|
bool static_queues_included)
|
||||||
{
|
{
|
||||||
int retval;
|
int retval;
|
||||||
enum kfd_unmap_queues_filter filter;
|
enum kfd_unmap_queues_filter filter;
|
||||||
|
|
@ -866,10 +871,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
||||||
|
|
||||||
retval = 0;
|
retval = 0;
|
||||||
|
|
||||||
if (lock)
|
|
||||||
mutex_lock(&dqm->lock);
|
|
||||||
if (!dqm->active_runlist)
|
if (!dqm->active_runlist)
|
||||||
goto out;
|
return retval;
|
||||||
|
|
||||||
pr_debug("Before destroying queues, sdma queue count is : %u\n",
|
pr_debug("Before destroying queues, sdma queue count is : %u\n",
|
||||||
dqm->sdma_queue_count);
|
dqm->sdma_queue_count);
|
||||||
|
|
@ -886,7 +889,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
||||||
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
|
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
|
||||||
filter, 0, false, 0);
|
filter, 0, false, 0);
|
||||||
if (retval)
|
if (retval)
|
||||||
goto out;
|
return retval;
|
||||||
|
|
||||||
*dqm->fence_addr = KFD_FENCE_INIT;
|
*dqm->fence_addr = KFD_FENCE_INIT;
|
||||||
pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
|
pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
|
||||||
|
|
@ -898,50 +901,38 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
|
||||||
pdd = kfd_get_process_device_data(dqm->dev,
|
pdd = kfd_get_process_device_data(dqm->dev,
|
||||||
kfd_get_process(current));
|
kfd_get_process(current));
|
||||||
pdd->reset_wavefronts = true;
|
pdd->reset_wavefronts = true;
|
||||||
goto out;
|
return retval;
|
||||||
}
|
}
|
||||||
pm_release_ib(&dqm->packets);
|
pm_release_ib(&dqm->packets);
|
||||||
dqm->active_runlist = false;
|
dqm->active_runlist = false;
|
||||||
|
|
||||||
out:
|
|
||||||
if (lock)
|
|
||||||
mutex_unlock(&dqm->lock);
|
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
|
/* dqm->lock mutex has to be locked before calling this function */
|
||||||
|
static int execute_queues_cpsch(struct device_queue_manager *dqm)
|
||||||
{
|
{
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
if (lock)
|
retval = unmap_queues_cpsch(dqm, false);
|
||||||
mutex_lock(&dqm->lock);
|
|
||||||
|
|
||||||
retval = unmap_queues_cpsch(dqm, false, false);
|
|
||||||
if (retval) {
|
if (retval) {
|
||||||
pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption");
|
pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption");
|
||||||
goto out;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dqm->queue_count <= 0 || dqm->processes_count <= 0) {
|
if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
|
||||||
retval = 0;
|
return 0;
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (dqm->active_runlist) {
|
if (dqm->active_runlist)
|
||||||
retval = 0;
|
return 0;
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
retval = pm_send_runlist(&dqm->packets, &dqm->queues);
|
retval = pm_send_runlist(&dqm->packets, &dqm->queues);
|
||||||
if (retval) {
|
if (retval) {
|
||||||
pr_err("failed to execute runlist");
|
pr_err("failed to execute runlist");
|
||||||
goto out;
|
return retval;
|
||||||
}
|
}
|
||||||
dqm->active_runlist = true;
|
dqm->active_runlist = true;
|
||||||
|
|
||||||
out:
|
|
||||||
if (lock)
|
|
||||||
mutex_unlock(&dqm->lock);
|
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -984,7 +975,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
|
||||||
if (q->properties.is_active)
|
if (q->properties.is_active)
|
||||||
dqm->queue_count--;
|
dqm->queue_count--;
|
||||||
|
|
||||||
execute_queues_cpsch(dqm, false);
|
execute_queues_cpsch(dqm);
|
||||||
|
|
||||||
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
|
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue