mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
drm/amdgpu: refine the job naming for amdgpu_job and amdgpu_sched_job
Use consistent naming across functions. Reviewed-by: Christian König <christian.koenig@amd.com> Reviewed-by: David Zhou <david1.zhou@amd.com> Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com>
This commit is contained in:
parent
bf60efd353
commit
4c7eb91cae
8 changed files with 71 additions and 69 deletions
|
@ -1275,7 +1275,7 @@ struct amdgpu_job {
|
||||||
uint32_t num_ibs;
|
uint32_t num_ibs;
|
||||||
struct mutex job_lock;
|
struct mutex job_lock;
|
||||||
struct amdgpu_user_fence uf;
|
struct amdgpu_user_fence uf;
|
||||||
int (*free_job)(struct amdgpu_job *sched_job);
|
int (*free_job)(struct amdgpu_job *job);
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
|
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
|
||||||
|
|
|
@ -778,15 +778,15 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_cs_free_job(struct amdgpu_job *sched_job)
|
static int amdgpu_cs_free_job(struct amdgpu_job *job)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
if (sched_job->ibs)
|
if (job->ibs)
|
||||||
for (i = 0; i < sched_job->num_ibs; i++)
|
for (i = 0; i < job->num_ibs; i++)
|
||||||
amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
|
amdgpu_ib_free(job->adev, &job->ibs[i]);
|
||||||
kfree(sched_job->ibs);
|
kfree(job->ibs);
|
||||||
if (sched_job->uf.bo)
|
if (job->uf.bo)
|
||||||
drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base);
|
drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,42 +27,42 @@
|
||||||
#include <drm/drmP.h>
|
#include <drm/drmP.h>
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
|
|
||||||
static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job)
|
static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
|
||||||
{
|
{
|
||||||
struct amdgpu_job *sched_job = (struct amdgpu_job *)job;
|
struct amdgpu_job *job = (struct amdgpu_job *)sched_job;
|
||||||
return amdgpu_sync_get_fence(&sched_job->ibs->sync);
|
return amdgpu_sync_get_fence(&job->ibs->sync);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
|
static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
|
||||||
{
|
{
|
||||||
struct amdgpu_fence *fence = NULL;
|
struct amdgpu_fence *fence = NULL;
|
||||||
struct amdgpu_job *sched_job;
|
struct amdgpu_job *job;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (!job) {
|
if (!sched_job) {
|
||||||
DRM_ERROR("job is null\n");
|
DRM_ERROR("job is null\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
sched_job = (struct amdgpu_job *)job;
|
job = (struct amdgpu_job *)sched_job;
|
||||||
mutex_lock(&sched_job->job_lock);
|
mutex_lock(&job->job_lock);
|
||||||
r = amdgpu_ib_schedule(sched_job->adev,
|
r = amdgpu_ib_schedule(job->adev,
|
||||||
sched_job->num_ibs,
|
job->num_ibs,
|
||||||
sched_job->ibs,
|
job->ibs,
|
||||||
sched_job->base.owner);
|
job->base.owner);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
DRM_ERROR("Error scheduling IBs (%d)\n", r);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
|
fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence);
|
||||||
|
|
||||||
err:
|
err:
|
||||||
if (sched_job->free_job)
|
if (job->free_job)
|
||||||
sched_job->free_job(sched_job);
|
job->free_job(job);
|
||||||
|
|
||||||
mutex_unlock(&sched_job->job_lock);
|
mutex_unlock(&job->job_lock);
|
||||||
fence_put(&sched_job->base.s_fence->base);
|
fence_put(&job->base.s_fence->base);
|
||||||
kfree(sched_job);
|
kfree(job);
|
||||||
return fence ? &fence->base : NULL;
|
return fence ? &fence->base : NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -805,10 +805,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_uvd_free_job(
|
static int amdgpu_uvd_free_job(
|
||||||
struct amdgpu_job *sched_job)
|
struct amdgpu_job *job)
|
||||||
{
|
{
|
||||||
amdgpu_ib_free(sched_job->adev, sched_job->ibs);
|
amdgpu_ib_free(job->adev, job->ibs);
|
||||||
kfree(sched_job->ibs);
|
kfree(job->ibs);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -342,10 +342,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_vce_free_job(
|
static int amdgpu_vce_free_job(
|
||||||
struct amdgpu_job *sched_job)
|
struct amdgpu_job *job)
|
||||||
{
|
{
|
||||||
amdgpu_ib_free(sched_job->adev, sched_job->ibs);
|
amdgpu_ib_free(job->adev, job->ibs);
|
||||||
kfree(sched_job->ibs);
|
kfree(job->ibs);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -316,12 +316,12 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_vm_free_job(struct amdgpu_job *sched_job)
|
int amdgpu_vm_free_job(struct amdgpu_job *job)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < sched_job->num_ibs; i++)
|
for (i = 0; i < job->num_ibs; i++)
|
||||||
amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
|
amdgpu_ib_free(job->adev, &job->ibs[i]);
|
||||||
kfree(sched_job->ibs);
|
kfree(job->ibs);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -68,29 +68,29 @@ static struct amd_sched_job *
|
||||||
amd_sched_rq_select_job(struct amd_sched_rq *rq)
|
amd_sched_rq_select_job(struct amd_sched_rq *rq)
|
||||||
{
|
{
|
||||||
struct amd_sched_entity *entity;
|
struct amd_sched_entity *entity;
|
||||||
struct amd_sched_job *job;
|
struct amd_sched_job *sched_job;
|
||||||
|
|
||||||
spin_lock(&rq->lock);
|
spin_lock(&rq->lock);
|
||||||
|
|
||||||
entity = rq->current_entity;
|
entity = rq->current_entity;
|
||||||
if (entity) {
|
if (entity) {
|
||||||
list_for_each_entry_continue(entity, &rq->entities, list) {
|
list_for_each_entry_continue(entity, &rq->entities, list) {
|
||||||
job = amd_sched_entity_pop_job(entity);
|
sched_job = amd_sched_entity_pop_job(entity);
|
||||||
if (job) {
|
if (sched_job) {
|
||||||
rq->current_entity = entity;
|
rq->current_entity = entity;
|
||||||
spin_unlock(&rq->lock);
|
spin_unlock(&rq->lock);
|
||||||
return job;
|
return sched_job;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(entity, &rq->entities, list) {
|
list_for_each_entry(entity, &rq->entities, list) {
|
||||||
|
|
||||||
job = amd_sched_entity_pop_job(entity);
|
sched_job = amd_sched_entity_pop_job(entity);
|
||||||
if (job) {
|
if (sched_job) {
|
||||||
rq->current_entity = entity;
|
rq->current_entity = entity;
|
||||||
spin_unlock(&rq->lock);
|
spin_unlock(&rq->lock);
|
||||||
return job;
|
return sched_job;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (entity == rq->current_entity)
|
if (entity == rq->current_entity)
|
||||||
|
@ -208,15 +208,15 @@ static struct amd_sched_job *
|
||||||
amd_sched_entity_pop_job(struct amd_sched_entity *entity)
|
amd_sched_entity_pop_job(struct amd_sched_entity *entity)
|
||||||
{
|
{
|
||||||
struct amd_gpu_scheduler *sched = entity->scheduler;
|
struct amd_gpu_scheduler *sched = entity->scheduler;
|
||||||
struct amd_sched_job *job;
|
struct amd_sched_job *sched_job;
|
||||||
|
|
||||||
if (ACCESS_ONCE(entity->dependency))
|
if (ACCESS_ONCE(entity->dependency))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job)))
|
if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
while ((entity->dependency = sched->ops->dependency(job))) {
|
while ((entity->dependency = sched->ops->dependency(sched_job))) {
|
||||||
|
|
||||||
if (fence_add_callback(entity->dependency, &entity->cb,
|
if (fence_add_callback(entity->dependency, &entity->cb,
|
||||||
amd_sched_entity_wakeup))
|
amd_sched_entity_wakeup))
|
||||||
|
@ -225,32 +225,33 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return job;
|
return sched_job;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Helper to submit a job to the job queue
|
* Helper to submit a job to the job queue
|
||||||
*
|
*
|
||||||
* @job The pointer to job required to submit
|
* @sched_job The pointer to job required to submit
|
||||||
*
|
*
|
||||||
* Returns true if we could submit the job.
|
* Returns true if we could submit the job.
|
||||||
*/
|
*/
|
||||||
static bool amd_sched_entity_in(struct amd_sched_job *job)
|
static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
|
||||||
{
|
{
|
||||||
struct amd_sched_entity *entity = job->s_entity;
|
struct amd_sched_entity *entity = sched_job->s_entity;
|
||||||
bool added, first = false;
|
bool added, first = false;
|
||||||
|
|
||||||
spin_lock(&entity->queue_lock);
|
spin_lock(&entity->queue_lock);
|
||||||
added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job);
|
added = kfifo_in(&entity->job_queue, &sched_job,
|
||||||
|
sizeof(sched_job)) == sizeof(sched_job);
|
||||||
|
|
||||||
if (added && kfifo_len(&entity->job_queue) == sizeof(job))
|
if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
|
||||||
first = true;
|
first = true;
|
||||||
|
|
||||||
spin_unlock(&entity->queue_lock);
|
spin_unlock(&entity->queue_lock);
|
||||||
|
|
||||||
/* first job wakes up scheduler */
|
/* first job wakes up scheduler */
|
||||||
if (first)
|
if (first)
|
||||||
amd_sched_wakeup(job->sched);
|
amd_sched_wakeup(sched_job->sched);
|
||||||
|
|
||||||
return added;
|
return added;
|
||||||
}
|
}
|
||||||
|
@ -258,7 +259,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *job)
|
||||||
/**
|
/**
|
||||||
* Submit a job to the job queue
|
* Submit a job to the job queue
|
||||||
*
|
*
|
||||||
* @job The pointer to job required to submit
|
* @sched_job The pointer to job required to submit
|
||||||
*
|
*
|
||||||
* Returns 0 for success, negative error code otherwise.
|
* Returns 0 for success, negative error code otherwise.
|
||||||
*/
|
*/
|
||||||
|
@ -304,17 +305,17 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
|
||||||
static struct amd_sched_job *
|
static struct amd_sched_job *
|
||||||
amd_sched_select_job(struct amd_gpu_scheduler *sched)
|
amd_sched_select_job(struct amd_gpu_scheduler *sched)
|
||||||
{
|
{
|
||||||
struct amd_sched_job *job;
|
struct amd_sched_job *sched_job;
|
||||||
|
|
||||||
if (!amd_sched_ready(sched))
|
if (!amd_sched_ready(sched))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/* Kernel run queue has higher priority than normal run queue*/
|
/* Kernel run queue has higher priority than normal run queue*/
|
||||||
job = amd_sched_rq_select_job(&sched->kernel_rq);
|
sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
|
||||||
if (job == NULL)
|
if (sched_job == NULL)
|
||||||
job = amd_sched_rq_select_job(&sched->sched_rq);
|
sched_job = amd_sched_rq_select_job(&sched->sched_rq);
|
||||||
|
|
||||||
return job;
|
return sched_job;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
|
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
|
||||||
|
@ -340,20 +341,20 @@ static int amd_sched_main(void *param)
|
||||||
while (!kthread_should_stop()) {
|
while (!kthread_should_stop()) {
|
||||||
struct amd_sched_entity *entity;
|
struct amd_sched_entity *entity;
|
||||||
struct amd_sched_fence *s_fence;
|
struct amd_sched_fence *s_fence;
|
||||||
struct amd_sched_job *job;
|
struct amd_sched_job *sched_job;
|
||||||
struct fence *fence;
|
struct fence *fence;
|
||||||
|
|
||||||
wait_event_interruptible(sched->wake_up_worker,
|
wait_event_interruptible(sched->wake_up_worker,
|
||||||
kthread_should_stop() ||
|
kthread_should_stop() ||
|
||||||
(job = amd_sched_select_job(sched)));
|
(sched_job = amd_sched_select_job(sched)));
|
||||||
|
|
||||||
if (!job)
|
if (!sched_job)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
entity = job->s_entity;
|
entity = sched_job->s_entity;
|
||||||
s_fence = job->s_fence;
|
s_fence = sched_job->s_fence;
|
||||||
atomic_inc(&sched->hw_rq_count);
|
atomic_inc(&sched->hw_rq_count);
|
||||||
fence = sched->ops->run_job(job);
|
fence = sched->ops->run_job(sched_job);
|
||||||
if (fence) {
|
if (fence) {
|
||||||
r = fence_add_callback(fence, &s_fence->cb,
|
r = fence_add_callback(fence, &s_fence->cb,
|
||||||
amd_sched_process_job);
|
amd_sched_process_job);
|
||||||
|
@ -367,8 +368,9 @@ static int amd_sched_main(void *param)
|
||||||
amd_sched_process_job(NULL, &s_fence->cb);
|
amd_sched_process_job(NULL, &s_fence->cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
count = kfifo_out(&entity->job_queue, &job, sizeof(job));
|
count = kfifo_out(&entity->job_queue, &sched_job,
|
||||||
WARN_ON(count != sizeof(job));
|
sizeof(sched_job));
|
||||||
|
WARN_ON(count != sizeof(sched_job));
|
||||||
wake_up(&sched->job_scheduled);
|
wake_up(&sched->job_scheduled);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -91,8 +91,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
|
||||||
* these functions should be implemented in driver side
|
* these functions should be implemented in driver side
|
||||||
*/
|
*/
|
||||||
struct amd_sched_backend_ops {
|
struct amd_sched_backend_ops {
|
||||||
struct fence *(*dependency)(struct amd_sched_job *job);
|
struct fence *(*dependency)(struct amd_sched_job *sched_job);
|
||||||
struct fence *(*run_job)(struct amd_sched_job *job);
|
struct fence *(*run_job)(struct amd_sched_job *sched_job);
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Add table
Reference in a new issue