mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 16:54:21 +00:00 
			
		
		
		
	drm/scheduler: provide scheduler score externally
Allow multiple schedulers to share the load balancing score. This is useful when one engine has different hw rings. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-and-Tested-by: Leo Liu <leo.liu@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210204144405.2737-1-christian.koenig@amd.com
This commit is contained in:
		
							parent
							
								
									f4a84e165e
								
							
						
					
					
						commit
						f2f12eb9c3
					
				
					 8 changed files with 22 additions and 21 deletions
				
			
		|  | @ -487,7 +487,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, | ||||||
| 
 | 
 | ||||||
| 		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, | 		r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, | ||||||
| 				   num_hw_submission, amdgpu_job_hang_limit, | 				   num_hw_submission, amdgpu_job_hang_limit, | ||||||
| 				   timeout, ring->name); | 				   timeout, NULL, ring->name); | ||||||
| 		if (r) { | 		if (r) { | ||||||
| 			DRM_ERROR("Failed to create scheduler on ring %s.\n", | 			DRM_ERROR("Failed to create scheduler on ring %s.\n", | ||||||
| 				  ring->name); | 				  ring->name); | ||||||
|  |  | ||||||
|  | @ -190,7 +190,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu) | ||||||
| 
 | 
 | ||||||
| 	ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, | 	ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops, | ||||||
| 			     etnaviv_hw_jobs_limit, etnaviv_job_hang_limit, | 			     etnaviv_hw_jobs_limit, etnaviv_job_hang_limit, | ||||||
| 			     msecs_to_jiffies(500), dev_name(gpu->dev)); | 			     msecs_to_jiffies(500), NULL, dev_name(gpu->dev)); | ||||||
| 	if (ret) | 	if (ret) | ||||||
| 		return ret; | 		return ret; | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -509,7 +509,7 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name) | ||||||
| 
 | 
 | ||||||
| 	return drm_sched_init(&pipe->base, &lima_sched_ops, 1, | 	return drm_sched_init(&pipe->base, &lima_sched_ops, 1, | ||||||
| 			      lima_job_hang_limit, msecs_to_jiffies(timeout), | 			      lima_job_hang_limit, msecs_to_jiffies(timeout), | ||||||
| 			      name); | 			      NULL, name); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| void lima_sched_pipe_fini(struct lima_sched_pipe *pipe) | void lima_sched_pipe_fini(struct lima_sched_pipe *pipe) | ||||||
|  |  | ||||||
|  | @ -627,7 +627,7 @@ int panfrost_job_init(struct panfrost_device *pfdev) | ||||||
| 		ret = drm_sched_init(&js->queue[j].sched, | 		ret = drm_sched_init(&js->queue[j].sched, | ||||||
| 				     &panfrost_sched_ops, | 				     &panfrost_sched_ops, | ||||||
| 				     1, 0, msecs_to_jiffies(JOB_TIMEOUT_MS), | 				     1, 0, msecs_to_jiffies(JOB_TIMEOUT_MS), | ||||||
| 				     "pan_js"); | 				     NULL, "pan_js"); | ||||||
| 		if (ret) { | 		if (ret) { | ||||||
| 			dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret); | 			dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret); | ||||||
| 			goto err_sched; | 			goto err_sched; | ||||||
|  |  | ||||||
|  | @ -489,7 +489,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job, | ||||||
| 	bool first; | 	bool first; | ||||||
| 
 | 
 | ||||||
| 	trace_drm_sched_job(sched_job, entity); | 	trace_drm_sched_job(sched_job, entity); | ||||||
| 	atomic_inc(&entity->rq->sched->score); | 	atomic_inc(entity->rq->sched->score); | ||||||
| 	WRITE_ONCE(entity->last_user, current->group_leader); | 	WRITE_ONCE(entity->last_user, current->group_leader); | ||||||
| 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); | 	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node); | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -91,7 +91,7 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq, | ||||||
| 	if (!list_empty(&entity->list)) | 	if (!list_empty(&entity->list)) | ||||||
| 		return; | 		return; | ||||||
| 	spin_lock(&rq->lock); | 	spin_lock(&rq->lock); | ||||||
| 	atomic_inc(&rq->sched->score); | 	atomic_inc(rq->sched->score); | ||||||
| 	list_add_tail(&entity->list, &rq->entities); | 	list_add_tail(&entity->list, &rq->entities); | ||||||
| 	spin_unlock(&rq->lock); | 	spin_unlock(&rq->lock); | ||||||
| } | } | ||||||
|  | @ -110,7 +110,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, | ||||||
| 	if (list_empty(&entity->list)) | 	if (list_empty(&entity->list)) | ||||||
| 		return; | 		return; | ||||||
| 	spin_lock(&rq->lock); | 	spin_lock(&rq->lock); | ||||||
| 	atomic_dec(&rq->sched->score); | 	atomic_dec(rq->sched->score); | ||||||
| 	list_del_init(&entity->list); | 	list_del_init(&entity->list); | ||||||
| 	if (rq->current_entity == entity) | 	if (rq->current_entity == entity) | ||||||
| 		rq->current_entity = NULL; | 		rq->current_entity = NULL; | ||||||
|  | @ -173,7 +173,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job) | ||||||
| 	struct drm_gpu_scheduler *sched = s_fence->sched; | 	struct drm_gpu_scheduler *sched = s_fence->sched; | ||||||
| 
 | 
 | ||||||
| 	atomic_dec(&sched->hw_rq_count); | 	atomic_dec(&sched->hw_rq_count); | ||||||
| 	atomic_dec(&sched->score); | 	atomic_dec(sched->score); | ||||||
| 
 | 
 | ||||||
| 	trace_drm_sched_process_job(s_fence); | 	trace_drm_sched_process_job(s_fence); | ||||||
| 
 | 
 | ||||||
|  | @ -732,7 +732,7 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, | ||||||
| 			continue; | 			continue; | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		num_score = atomic_read(&sched->score); | 		num_score = atomic_read(sched->score); | ||||||
| 		if (num_score < min_score) { | 		if (num_score < min_score) { | ||||||
| 			min_score = num_score; | 			min_score = num_score; | ||||||
| 			picked_sched = sched; | 			picked_sched = sched; | ||||||
|  | @ -842,16 +842,15 @@ static int drm_sched_main(void *param) | ||||||
|  * @hw_submission: number of hw submissions that can be in flight |  * @hw_submission: number of hw submissions that can be in flight | ||||||
|  * @hang_limit: number of times to allow a job to hang before dropping it |  * @hang_limit: number of times to allow a job to hang before dropping it | ||||||
|  * @timeout: timeout value in jiffies for the scheduler |  * @timeout: timeout value in jiffies for the scheduler | ||||||
|  |  * @score: optional score atomic shared with other schedulers | ||||||
|  * @name: name used for debugging |  * @name: name used for debugging | ||||||
|  * |  * | ||||||
|  * Return 0 on success, otherwise error code. |  * Return 0 on success, otherwise error code. | ||||||
|  */ |  */ | ||||||
| int drm_sched_init(struct drm_gpu_scheduler *sched, | int drm_sched_init(struct drm_gpu_scheduler *sched, | ||||||
| 		   const struct drm_sched_backend_ops *ops, | 		   const struct drm_sched_backend_ops *ops, | ||||||
| 		   unsigned hw_submission, | 		   unsigned hw_submission, unsigned hang_limit, long timeout, | ||||||
| 		   unsigned hang_limit, | 		   atomic_t *score, const char *name) | ||||||
| 		   long timeout, |  | ||||||
| 		   const char *name) |  | ||||||
| { | { | ||||||
| 	int i, ret; | 	int i, ret; | ||||||
| 	sched->ops = ops; | 	sched->ops = ops; | ||||||
|  | @ -859,6 +858,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, | ||||||
| 	sched->name = name; | 	sched->name = name; | ||||||
| 	sched->timeout = timeout; | 	sched->timeout = timeout; | ||||||
| 	sched->hang_limit = hang_limit; | 	sched->hang_limit = hang_limit; | ||||||
|  | 	sched->score = score ? score : &sched->_score; | ||||||
| 	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++) | 	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++) | ||||||
| 		drm_sched_rq_init(sched, &sched->sched_rq[i]); | 		drm_sched_rq_init(sched, &sched->sched_rq[i]); | ||||||
| 
 | 
 | ||||||
|  | @ -868,7 +868,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, | ||||||
| 	spin_lock_init(&sched->job_list_lock); | 	spin_lock_init(&sched->job_list_lock); | ||||||
| 	atomic_set(&sched->hw_rq_count, 0); | 	atomic_set(&sched->hw_rq_count, 0); | ||||||
| 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); | 	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); | ||||||
| 	atomic_set(&sched->score, 0); | 	atomic_set(&sched->_score, 0); | ||||||
| 	atomic64_set(&sched->job_id_count, 0); | 	atomic64_set(&sched->job_id_count, 0); | ||||||
| 
 | 
 | ||||||
| 	/* Each scheduler will run on a seperate kernel thread */ | 	/* Each scheduler will run on a seperate kernel thread */ | ||||||
|  |  | ||||||
|  | @ -403,7 +403,7 @@ v3d_sched_init(struct v3d_dev *v3d) | ||||||
| 			     &v3d_bin_sched_ops, | 			     &v3d_bin_sched_ops, | ||||||
| 			     hw_jobs_limit, job_hang_limit, | 			     hw_jobs_limit, job_hang_limit, | ||||||
| 			     msecs_to_jiffies(hang_limit_ms), | 			     msecs_to_jiffies(hang_limit_ms), | ||||||
| 			     "v3d_bin"); | 			     NULL, "v3d_bin"); | ||||||
| 	if (ret) { | 	if (ret) { | ||||||
| 		dev_err(v3d->drm.dev, "Failed to create bin scheduler: %d.", ret); | 		dev_err(v3d->drm.dev, "Failed to create bin scheduler: %d.", ret); | ||||||
| 		return ret; | 		return ret; | ||||||
|  | @ -413,7 +413,7 @@ v3d_sched_init(struct v3d_dev *v3d) | ||||||
| 			     &v3d_render_sched_ops, | 			     &v3d_render_sched_ops, | ||||||
| 			     hw_jobs_limit, job_hang_limit, | 			     hw_jobs_limit, job_hang_limit, | ||||||
| 			     msecs_to_jiffies(hang_limit_ms), | 			     msecs_to_jiffies(hang_limit_ms), | ||||||
| 			     "v3d_render"); | 			     NULL, "v3d_render"); | ||||||
| 	if (ret) { | 	if (ret) { | ||||||
| 		dev_err(v3d->drm.dev, "Failed to create render scheduler: %d.", | 		dev_err(v3d->drm.dev, "Failed to create render scheduler: %d.", | ||||||
| 			ret); | 			ret); | ||||||
|  | @ -425,7 +425,7 @@ v3d_sched_init(struct v3d_dev *v3d) | ||||||
| 			     &v3d_tfu_sched_ops, | 			     &v3d_tfu_sched_ops, | ||||||
| 			     hw_jobs_limit, job_hang_limit, | 			     hw_jobs_limit, job_hang_limit, | ||||||
| 			     msecs_to_jiffies(hang_limit_ms), | 			     msecs_to_jiffies(hang_limit_ms), | ||||||
| 			     "v3d_tfu"); | 			     NULL, "v3d_tfu"); | ||||||
| 	if (ret) { | 	if (ret) { | ||||||
| 		dev_err(v3d->drm.dev, "Failed to create TFU scheduler: %d.", | 		dev_err(v3d->drm.dev, "Failed to create TFU scheduler: %d.", | ||||||
| 			ret); | 			ret); | ||||||
|  | @ -438,7 +438,7 @@ v3d_sched_init(struct v3d_dev *v3d) | ||||||
| 				     &v3d_csd_sched_ops, | 				     &v3d_csd_sched_ops, | ||||||
| 				     hw_jobs_limit, job_hang_limit, | 				     hw_jobs_limit, job_hang_limit, | ||||||
| 				     msecs_to_jiffies(hang_limit_ms), | 				     msecs_to_jiffies(hang_limit_ms), | ||||||
| 				     "v3d_csd"); | 				     NULL, "v3d_csd"); | ||||||
| 		if (ret) { | 		if (ret) { | ||||||
| 			dev_err(v3d->drm.dev, "Failed to create CSD scheduler: %d.", | 			dev_err(v3d->drm.dev, "Failed to create CSD scheduler: %d.", | ||||||
| 				ret); | 				ret); | ||||||
|  | @ -450,7 +450,7 @@ v3d_sched_init(struct v3d_dev *v3d) | ||||||
| 				     &v3d_cache_clean_sched_ops, | 				     &v3d_cache_clean_sched_ops, | ||||||
| 				     hw_jobs_limit, job_hang_limit, | 				     hw_jobs_limit, job_hang_limit, | ||||||
| 				     msecs_to_jiffies(hang_limit_ms), | 				     msecs_to_jiffies(hang_limit_ms), | ||||||
| 				     "v3d_cache_clean"); | 				     NULL, "v3d_cache_clean"); | ||||||
| 		if (ret) { | 		if (ret) { | ||||||
| 			dev_err(v3d->drm.dev, "Failed to create CACHE_CLEAN scheduler: %d.", | 			dev_err(v3d->drm.dev, "Failed to create CACHE_CLEAN scheduler: %d.", | ||||||
| 				ret); | 				ret); | ||||||
|  |  | ||||||
|  | @ -297,7 +297,8 @@ struct drm_gpu_scheduler { | ||||||
| 	struct list_head		pending_list; | 	struct list_head		pending_list; | ||||||
| 	spinlock_t			job_list_lock; | 	spinlock_t			job_list_lock; | ||||||
| 	int				hang_limit; | 	int				hang_limit; | ||||||
| 	atomic_t                        score; | 	atomic_t                        *score; | ||||||
|  | 	atomic_t                        _score; | ||||||
| 	bool				ready; | 	bool				ready; | ||||||
| 	bool				free_guilty; | 	bool				free_guilty; | ||||||
| }; | }; | ||||||
|  | @ -305,7 +306,7 @@ struct drm_gpu_scheduler { | ||||||
| int drm_sched_init(struct drm_gpu_scheduler *sched, | int drm_sched_init(struct drm_gpu_scheduler *sched, | ||||||
| 		   const struct drm_sched_backend_ops *ops, | 		   const struct drm_sched_backend_ops *ops, | ||||||
| 		   uint32_t hw_submission, unsigned hang_limit, long timeout, | 		   uint32_t hw_submission, unsigned hang_limit, long timeout, | ||||||
| 		   const char *name); | 		   atomic_t *score, const char *name); | ||||||
| 
 | 
 | ||||||
| void drm_sched_fini(struct drm_gpu_scheduler *sched); | void drm_sched_fini(struct drm_gpu_scheduler *sched); | ||||||
| int drm_sched_job_init(struct drm_sched_job *job, | int drm_sched_job_init(struct drm_sched_job *job, | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Christian König
						Christian König