mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-10-31 16:54:21 +00:00 
			
		
		
		
	drm/i915: Move intel_engine_mask_t around for use by i915_request_types.h
We want to use intel_engine_mask_t inside i915_request.h, which means extracting it from the general header file mess and placing it inside a types.h. A knock on effect is that the compiler wants to warn about type-contraction of ALL_ENGINES into intel_engine_maskt_t, so prepare for the worst. v2: Use intel_engine_mask_t consistently v3: Move I915_NUM_ENGINES to its natural home at the end of the enum Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: John Harrison <John.C.Harrison@Intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190401162641.10963-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
This commit is contained in:
		
							parent
							
								
									b01720bfcd
								
							
						
					
					
						commit
						3a891a6267
					
				
					 29 changed files with 191 additions and 152 deletions
				
			
		|  | @ -61,6 +61,7 @@ i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o | |||
| i915-$(CONFIG_DRM_I915_WERROR) += \
 | ||||
| 	test_i915_active_types_standalone.o \
 | ||||
| 	test_i915_gem_context_types_standalone.o \
 | ||||
| 	test_i915_scheduler_types_standalone.o \
 | ||||
| 	test_i915_timeline_types_standalone.o \
 | ||||
| 	test_intel_context_types_standalone.o \
 | ||||
| 	test_intel_engine_types_standalone.o \
 | ||||
|  |  | |||
|  | @ -526,12 +526,13 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) | |||
| 	vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; | ||||
| } | ||||
| 
 | ||||
| static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask) | ||||
| static void clean_execlist(struct intel_vgpu *vgpu, | ||||
| 			   intel_engine_mask_t engine_mask) | ||||
| { | ||||
| 	unsigned int tmp; | ||||
| 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | ||||
| 	struct intel_engine_cs *engine; | ||||
| 	struct intel_vgpu_submission *s = &vgpu->submission; | ||||
| 	intel_engine_mask_t tmp; | ||||
| 
 | ||||
| 	for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { | ||||
| 		kfree(s->ring_scan_buffer[engine->id]); | ||||
|  | @ -541,18 +542,18 @@ static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask) | |||
| } | ||||
| 
 | ||||
| static void reset_execlist(struct intel_vgpu *vgpu, | ||||
| 		unsigned long engine_mask) | ||||
| 			   intel_engine_mask_t engine_mask) | ||||
| { | ||||
| 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | ||||
| 	struct intel_engine_cs *engine; | ||||
| 	unsigned int tmp; | ||||
| 	intel_engine_mask_t tmp; | ||||
| 
 | ||||
| 	for_each_engine_masked(engine, dev_priv, engine_mask, tmp) | ||||
| 		init_vgpu_execlist(vgpu, engine->id); | ||||
| } | ||||
| 
 | ||||
| static int init_execlist(struct intel_vgpu *vgpu, | ||||
| 			 unsigned long engine_mask) | ||||
| 			 intel_engine_mask_t engine_mask) | ||||
| { | ||||
| 	reset_execlist(vgpu, engine_mask); | ||||
| 	return 0; | ||||
|  |  | |||
|  | @ -180,6 +180,6 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu); | |||
| int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id); | ||||
| 
 | ||||
| void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, | ||||
| 		unsigned long engine_mask); | ||||
| 			       intel_engine_mask_t engine_mask); | ||||
| 
 | ||||
| #endif /*_GVT_EXECLIST_H_*/ | ||||
|  |  | |||
|  | @ -144,9 +144,9 @@ enum { | |||
| 
 | ||||
| struct intel_vgpu_submission_ops { | ||||
| 	const char *name; | ||||
| 	int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask); | ||||
| 	void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask); | ||||
| 	void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask); | ||||
| 	int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); | ||||
| 	void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); | ||||
| 	void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); | ||||
| }; | ||||
| 
 | ||||
| struct intel_vgpu_submission { | ||||
|  | @ -488,7 +488,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, | |||
| void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); | ||||
| void intel_gvt_release_vgpu(struct intel_vgpu *vgpu); | ||||
| void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, | ||||
| 				 unsigned int engine_mask); | ||||
| 				 intel_engine_mask_t engine_mask); | ||||
| void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); | ||||
| void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu); | ||||
| void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu); | ||||
|  |  | |||
|  | @ -311,7 +311,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, | |||
| static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | ||||
| 			    void *p_data, unsigned int bytes) | ||||
| { | ||||
| 	unsigned int engine_mask = 0; | ||||
| 	intel_engine_mask_t engine_mask = 0; | ||||
| 	u32 data; | ||||
| 
 | ||||
| 	write_vreg(vgpu, offset, p_data, bytes); | ||||
|  |  | |||
|  | @ -838,13 +838,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload) | |||
| } | ||||
| 
 | ||||
| void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, | ||||
| 				unsigned long engine_mask) | ||||
| 				intel_engine_mask_t engine_mask) | ||||
| { | ||||
| 	struct intel_vgpu_submission *s = &vgpu->submission; | ||||
| 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | ||||
| 	struct intel_engine_cs *engine; | ||||
| 	struct intel_vgpu_workload *pos, *n; | ||||
| 	unsigned int tmp; | ||||
| 	intel_engine_mask_t tmp; | ||||
| 
 | ||||
| 	/* free the unsubmited workloads in the queues. */ | ||||
| 	for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { | ||||
|  | @ -1137,7 +1137,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) | |||
|  * | ||||
|  */ | ||||
| void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, | ||||
| 		unsigned long engine_mask) | ||||
| 				 intel_engine_mask_t engine_mask) | ||||
| { | ||||
| 	struct intel_vgpu_submission *s = &vgpu->submission; | ||||
| 
 | ||||
|  | @ -1227,7 +1227,7 @@ out_shadow_ctx: | |||
|  * | ||||
|  */ | ||||
| int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, | ||||
| 				     unsigned long engine_mask, | ||||
| 				     intel_engine_mask_t engine_mask, | ||||
| 				     unsigned int interface) | ||||
| { | ||||
| 	struct intel_vgpu_submission *s = &vgpu->submission; | ||||
|  |  | |||
|  | @ -142,12 +142,12 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu); | |||
| int intel_vgpu_setup_submission(struct intel_vgpu *vgpu); | ||||
| 
 | ||||
| void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, | ||||
| 				 unsigned long engine_mask); | ||||
| 				 intel_engine_mask_t engine_mask); | ||||
| 
 | ||||
| void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); | ||||
| 
 | ||||
| int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, | ||||
| 				     unsigned long engine_mask, | ||||
| 				     intel_engine_mask_t engine_mask, | ||||
| 				     unsigned int interface); | ||||
| 
 | ||||
| extern const struct intel_vgpu_submission_ops | ||||
|  | @ -160,6 +160,6 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, | |||
| void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload); | ||||
| 
 | ||||
| void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, | ||||
| 				unsigned long engine_mask); | ||||
| 				intel_engine_mask_t engine_mask); | ||||
| 
 | ||||
| #endif | ||||
|  |  | |||
|  | @ -526,11 +526,11 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, | |||
|  * GPU engines. For FLR, engine_mask is ignored. | ||||
|  */ | ||||
| void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, | ||||
| 				 unsigned int engine_mask) | ||||
| 				 intel_engine_mask_t engine_mask) | ||||
| { | ||||
| 	struct intel_gvt *gvt = vgpu->gvt; | ||||
| 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | ||||
| 	unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask; | ||||
| 	intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask; | ||||
| 
 | ||||
| 	gvt_dbg_core("------------------------------------------\n"); | ||||
| 	gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", | ||||
|  |  | |||
|  | @ -2245,7 +2245,7 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data) | |||
| 	const struct intel_guc *guc = &dev_priv->guc; | ||||
| 	struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr; | ||||
| 	struct intel_guc_client *client = guc->execbuf_client; | ||||
| 	unsigned int tmp; | ||||
| 	intel_engine_mask_t tmp; | ||||
| 	int index; | ||||
| 
 | ||||
| 	if (!USES_GUC_SUBMISSION(dev_priv)) | ||||
|  |  | |||
|  | @ -2505,7 +2505,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, | |||
| #define IS_GEN9_LP(dev_priv)	(IS_GEN(dev_priv, 9) && IS_LP(dev_priv)) | ||||
| #define IS_GEN9_BC(dev_priv)	(IS_GEN(dev_priv, 9) && !IS_LP(dev_priv)) | ||||
| 
 | ||||
| #define ALL_ENGINES	(~0u) | ||||
| #define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id)) | ||||
| 
 | ||||
| #define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({		\ | ||||
|  |  | |||
|  | @ -73,8 +73,6 @@ struct drm_i915_private; | |||
| #define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr) | ||||
| #endif | ||||
| 
 | ||||
| #define I915_NUM_ENGINES 8 | ||||
| 
 | ||||
| #define I915_GEM_IDLE_TIMEOUT (HZ / 5) | ||||
| 
 | ||||
| void i915_gem_park(struct drm_i915_private *i915); | ||||
|  |  | |||
|  | @ -858,9 +858,9 @@ static void cb_retire(struct i915_active *base) | |||
| 	kfree(cb); | ||||
| } | ||||
| 
 | ||||
| I915_SELFTEST_DECLARE(static unsigned long context_barrier_inject_fault); | ||||
| I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); | ||||
| static int context_barrier_task(struct i915_gem_context *ctx, | ||||
| 				unsigned long engines, | ||||
| 				intel_engine_mask_t engines, | ||||
| 				int (*emit)(struct i915_request *rq, void *data), | ||||
| 				void (*task)(void *data), | ||||
| 				void *data) | ||||
|  | @ -922,7 +922,7 @@ static int context_barrier_task(struct i915_gem_context *ctx, | |||
| } | ||||
| 
 | ||||
| int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915, | ||||
| 				      unsigned long mask) | ||||
| 				      intel_engine_mask_t mask) | ||||
| { | ||||
| 	struct intel_engine_cs *engine; | ||||
| 
 | ||||
|  |  | |||
|  | @ -142,7 +142,7 @@ void i915_gem_context_close(struct drm_file *file); | |||
| 
 | ||||
| int i915_switch_context(struct i915_request *rq); | ||||
| int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915, | ||||
| 				      unsigned long engine_mask); | ||||
| 				      intel_engine_mask_t engine_mask); | ||||
| 
 | ||||
| void i915_gem_context_release(struct kref *ctx_ref); | ||||
| struct i915_gem_context * | ||||
|  |  | |||
|  | @ -390,7 +390,7 @@ struct i915_hw_ppgtt { | |||
| 	struct i915_address_space vm; | ||||
| 	struct kref ref; | ||||
| 
 | ||||
| 	unsigned long pd_dirty_engines; | ||||
| 	intel_engine_mask_t pd_dirty_engines; | ||||
| 	union { | ||||
| 		struct i915_pml4 pml4;		/* GEN8+ & 48b PPGTT */ | ||||
| 		struct i915_page_directory_pointer pdp;	/* GEN8+ */ | ||||
|  |  | |||
|  | @ -1096,7 +1096,7 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err, | |||
|  * It's only a small step better than a random number in its current form. | ||||
|  */ | ||||
| static u32 i915_error_generate_code(struct i915_gpu_state *error, | ||||
| 				    unsigned long engine_mask) | ||||
| 				    intel_engine_mask_t engine_mask) | ||||
| { | ||||
| 	/*
 | ||||
| 	 * IPEHR would be an ideal way to detect errors, as it's the gross | ||||
|  | @ -1641,7 +1641,8 @@ static void capture_reg_state(struct i915_gpu_state *error) | |||
| } | ||||
| 
 | ||||
| static const char * | ||||
| error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg) | ||||
| error_msg(struct i915_gpu_state *error, | ||||
| 	  intel_engine_mask_t engines, const char *msg) | ||||
| { | ||||
| 	int len; | ||||
| 	int i; | ||||
|  | @ -1651,7 +1652,7 @@ error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg) | |||
| 			engines &= ~BIT(i); | ||||
| 
 | ||||
| 	len = scnprintf(error->error_msg, sizeof(error->error_msg), | ||||
| 			"GPU HANG: ecode %d:%lx:0x%08x", | ||||
| 			"GPU HANG: ecode %d:%x:0x%08x", | ||||
| 			INTEL_GEN(error->i915), engines, | ||||
| 			i915_error_generate_code(error, engines)); | ||||
| 	if (engines) { | ||||
|  | @ -1790,7 +1791,7 @@ i915_capture_gpu_state(struct drm_i915_private *i915) | |||
|  * to pick up. | ||||
|  */ | ||||
| void i915_capture_error_state(struct drm_i915_private *i915, | ||||
| 			      unsigned long engine_mask, | ||||
| 			      intel_engine_mask_t engine_mask, | ||||
| 			      const char *msg) | ||||
| { | ||||
| 	static bool warned; | ||||
|  |  | |||
|  | @ -263,7 +263,7 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); | |||
| 
 | ||||
| struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915); | ||||
| void i915_capture_error_state(struct drm_i915_private *dev_priv, | ||||
| 			      unsigned long engine_mask, | ||||
| 			      intel_engine_mask_t engine_mask, | ||||
| 			      const char *error_msg); | ||||
| 
 | ||||
| static inline struct i915_gpu_state * | ||||
|  |  | |||
|  | @ -144,15 +144,15 @@ static void gen3_stop_engine(struct intel_engine_cs *engine) | |||
| } | ||||
| 
 | ||||
| static void i915_stop_engines(struct drm_i915_private *i915, | ||||
| 			      unsigned int engine_mask) | ||||
| 			      intel_engine_mask_t engine_mask) | ||||
| { | ||||
| 	struct intel_engine_cs *engine; | ||||
| 	enum intel_engine_id id; | ||||
| 	intel_engine_mask_t tmp; | ||||
| 
 | ||||
| 	if (INTEL_GEN(i915) < 3) | ||||
| 		return; | ||||
| 
 | ||||
| 	for_each_engine_masked(engine, i915, engine_mask, id) | ||||
| 	for_each_engine_masked(engine, i915, engine_mask, tmp) | ||||
| 		gen3_stop_engine(engine); | ||||
| } | ||||
| 
 | ||||
|  | @ -165,7 +165,7 @@ static bool i915_in_reset(struct pci_dev *pdev) | |||
| } | ||||
| 
 | ||||
| static int i915_do_reset(struct drm_i915_private *i915, | ||||
| 			 unsigned int engine_mask, | ||||
| 			 intel_engine_mask_t engine_mask, | ||||
| 			 unsigned int retry) | ||||
| { | ||||
| 	struct pci_dev *pdev = i915->drm.pdev; | ||||
|  | @ -194,7 +194,7 @@ static bool g4x_reset_complete(struct pci_dev *pdev) | |||
| } | ||||
| 
 | ||||
| static int g33_do_reset(struct drm_i915_private *i915, | ||||
| 			unsigned int engine_mask, | ||||
| 			intel_engine_mask_t engine_mask, | ||||
| 			unsigned int retry) | ||||
| { | ||||
| 	struct pci_dev *pdev = i915->drm.pdev; | ||||
|  | @ -204,7 +204,7 @@ static int g33_do_reset(struct drm_i915_private *i915, | |||
| } | ||||
| 
 | ||||
| static int g4x_do_reset(struct drm_i915_private *dev_priv, | ||||
| 			unsigned int engine_mask, | ||||
| 			intel_engine_mask_t engine_mask, | ||||
| 			unsigned int retry) | ||||
| { | ||||
| 	struct pci_dev *pdev = dev_priv->drm.pdev; | ||||
|  | @ -242,7 +242,7 @@ out: | |||
| } | ||||
| 
 | ||||
| static int ironlake_do_reset(struct drm_i915_private *dev_priv, | ||||
| 			     unsigned int engine_mask, | ||||
| 			     intel_engine_mask_t engine_mask, | ||||
| 			     unsigned int retry) | ||||
| { | ||||
| 	struct intel_uncore *uncore = &dev_priv->uncore; | ||||
|  | @ -303,7 +303,7 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, | |||
| } | ||||
| 
 | ||||
| static int gen6_reset_engines(struct drm_i915_private *i915, | ||||
| 			      unsigned int engine_mask, | ||||
| 			      intel_engine_mask_t engine_mask, | ||||
| 			      unsigned int retry) | ||||
| { | ||||
| 	struct intel_engine_cs *engine; | ||||
|  | @ -319,7 +319,7 @@ static int gen6_reset_engines(struct drm_i915_private *i915, | |||
| 	if (engine_mask == ALL_ENGINES) { | ||||
| 		hw_mask = GEN6_GRDOM_FULL; | ||||
| 	} else { | ||||
| 		unsigned int tmp; | ||||
| 		intel_engine_mask_t tmp; | ||||
| 
 | ||||
| 		hw_mask = 0; | ||||
| 		for_each_engine_masked(engine, i915, engine_mask, tmp) { | ||||
|  | @ -429,7 +429,7 @@ static void gen11_unlock_sfc(struct drm_i915_private *dev_priv, | |||
| } | ||||
| 
 | ||||
| static int gen11_reset_engines(struct drm_i915_private *i915, | ||||
| 			       unsigned int engine_mask, | ||||
| 			       intel_engine_mask_t engine_mask, | ||||
| 			       unsigned int retry) | ||||
| { | ||||
| 	const u32 hw_engine_mask[] = { | ||||
|  | @ -443,7 +443,7 @@ static int gen11_reset_engines(struct drm_i915_private *i915, | |||
| 		[VECS1] = GEN11_GRDOM_VECS2, | ||||
| 	}; | ||||
| 	struct intel_engine_cs *engine; | ||||
| 	unsigned int tmp; | ||||
| 	intel_engine_mask_t tmp; | ||||
| 	u32 hw_mask; | ||||
| 	int ret; | ||||
| 
 | ||||
|  | @ -496,12 +496,12 @@ static void gen8_engine_reset_cancel(struct intel_engine_cs *engine) | |||
| } | ||||
| 
 | ||||
| static int gen8_reset_engines(struct drm_i915_private *i915, | ||||
| 			      unsigned int engine_mask, | ||||
| 			      intel_engine_mask_t engine_mask, | ||||
| 			      unsigned int retry) | ||||
| { | ||||
| 	struct intel_engine_cs *engine; | ||||
| 	const bool reset_non_ready = retry >= 1; | ||||
| 	unsigned int tmp; | ||||
| 	intel_engine_mask_t tmp; | ||||
| 	int ret; | ||||
| 
 | ||||
| 	for_each_engine_masked(engine, i915, engine_mask, tmp) { | ||||
|  | @ -537,7 +537,7 @@ skip_reset: | |||
| } | ||||
| 
 | ||||
| typedef int (*reset_func)(struct drm_i915_private *, | ||||
| 			  unsigned int engine_mask, | ||||
| 			  intel_engine_mask_t engine_mask, | ||||
| 			  unsigned int retry); | ||||
| 
 | ||||
| static reset_func intel_get_gpu_reset(struct drm_i915_private *i915) | ||||
|  | @ -558,7 +558,8 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *i915) | |||
| 		return NULL; | ||||
| } | ||||
| 
 | ||||
| int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask) | ||||
| int intel_gpu_reset(struct drm_i915_private *i915, | ||||
| 		    intel_engine_mask_t engine_mask) | ||||
| { | ||||
| 	const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1; | ||||
| 	reset_func reset; | ||||
|  | @ -692,7 +693,8 @@ static void gt_revoke(struct drm_i915_private *i915) | |||
| 	revoke_mmaps(i915); | ||||
| } | ||||
| 
 | ||||
| static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask) | ||||
| static int gt_reset(struct drm_i915_private *i915, | ||||
| 		    intel_engine_mask_t stalled_mask) | ||||
| { | ||||
| 	struct intel_engine_cs *engine; | ||||
| 	enum intel_engine_id id; | ||||
|  | @ -951,7 +953,8 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915) | |||
| 	return result; | ||||
| } | ||||
| 
 | ||||
| static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask) | ||||
| static int do_reset(struct drm_i915_private *i915, | ||||
| 		    intel_engine_mask_t stalled_mask) | ||||
| { | ||||
| 	int err, i; | ||||
| 
 | ||||
|  | @ -986,7 +989,7 @@ static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask) | |||
|  *   - re-init display | ||||
|  */ | ||||
| void i915_reset(struct drm_i915_private *i915, | ||||
| 		unsigned int stalled_mask, | ||||
| 		intel_engine_mask_t stalled_mask, | ||||
| 		const char *reason) | ||||
| { | ||||
| 	struct i915_gpu_error *error = &i915->gpu_error; | ||||
|  | @ -1233,14 +1236,14 @@ void i915_clear_error_registers(struct drm_i915_private *dev_priv) | |||
|  * of a ring dump etc.). | ||||
|  */ | ||||
| void i915_handle_error(struct drm_i915_private *i915, | ||||
| 		       u32 engine_mask, | ||||
| 		       intel_engine_mask_t engine_mask, | ||||
| 		       unsigned long flags, | ||||
| 		       const char *fmt, ...) | ||||
| { | ||||
| 	struct i915_gpu_error *error = &i915->gpu_error; | ||||
| 	struct intel_engine_cs *engine; | ||||
| 	intel_wakeref_t wakeref; | ||||
| 	unsigned int tmp; | ||||
| 	intel_engine_mask_t tmp; | ||||
| 	char error_msg[80]; | ||||
| 	char *msg = NULL; | ||||
| 
 | ||||
|  |  | |||
|  | @ -11,13 +11,15 @@ | |||
| #include <linux/types.h> | ||||
| #include <linux/srcu.h> | ||||
| 
 | ||||
| #include "intel_engine_types.h" | ||||
| 
 | ||||
| struct drm_i915_private; | ||||
| struct intel_engine_cs; | ||||
| struct intel_guc; | ||||
| 
 | ||||
| __printf(4, 5) | ||||
| void i915_handle_error(struct drm_i915_private *i915, | ||||
| 		       u32 engine_mask, | ||||
| 		       intel_engine_mask_t engine_mask, | ||||
| 		       unsigned long flags, | ||||
| 		       const char *fmt, ...); | ||||
| #define I915_ERROR_CAPTURE BIT(0) | ||||
|  | @ -25,7 +27,7 @@ void i915_handle_error(struct drm_i915_private *i915, | |||
| void i915_clear_error_registers(struct drm_i915_private *i915); | ||||
| 
 | ||||
| void i915_reset(struct drm_i915_private *i915, | ||||
| 		unsigned int stalled_mask, | ||||
| 		intel_engine_mask_t stalled_mask, | ||||
| 		const char *reason); | ||||
| int i915_reset_engine(struct intel_engine_cs *engine, | ||||
| 		      const char *reason); | ||||
|  | @ -41,7 +43,8 @@ int i915_terminally_wedged(struct drm_i915_private *i915); | |||
| bool intel_has_gpu_reset(struct drm_i915_private *i915); | ||||
| bool intel_has_reset_engine(struct drm_i915_private *i915); | ||||
| 
 | ||||
| int intel_gpu_reset(struct drm_i915_private *i915, u32 engine_mask); | ||||
| int intel_gpu_reset(struct drm_i915_private *i915, | ||||
| 		    intel_engine_mask_t engine_mask); | ||||
| 
 | ||||
| int intel_reset_guc(struct drm_i915_private *i915); | ||||
| 
 | ||||
|  |  | |||
|  | @ -8,92 +8,10 @@ | |||
| #define _I915_SCHEDULER_H_ | ||||
| 
 | ||||
| #include <linux/bitops.h> | ||||
| #include <linux/list.h> | ||||
| #include <linux/kernel.h> | ||||
| 
 | ||||
| #include <uapi/drm/i915_drm.h> | ||||
| 
 | ||||
| struct drm_i915_private; | ||||
| struct i915_request; | ||||
| struct intel_engine_cs; | ||||
| 
 | ||||
| enum { | ||||
| 	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1, | ||||
| 	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY, | ||||
| 	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1, | ||||
| 
 | ||||
| 	I915_PRIORITY_INVALID = INT_MIN | ||||
| }; | ||||
| 
 | ||||
| #define I915_USER_PRIORITY_SHIFT 3 | ||||
| #define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT) | ||||
| 
 | ||||
| #define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT) | ||||
| #define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1) | ||||
| 
 | ||||
| #define I915_PRIORITY_WAIT		((u8)BIT(0)) | ||||
| #define I915_PRIORITY_NEWCLIENT		((u8)BIT(1)) | ||||
| #define I915_PRIORITY_NOSEMAPHORE	((u8)BIT(2)) | ||||
| 
 | ||||
| #define __NO_PREEMPTION (I915_PRIORITY_WAIT) | ||||
| 
 | ||||
| struct i915_sched_attr { | ||||
| 	/**
 | ||||
| 	 * @priority: execution and service priority | ||||
| 	 * | ||||
| 	 * All clients are equal, but some are more equal than others! | ||||
| 	 * | ||||
| 	 * Requests from a context with a greater (more positive) value of | ||||
| 	 * @priority will be executed before those with a lower @priority | ||||
| 	 * value, forming a simple QoS. | ||||
| 	 * | ||||
| 	 * The &drm_i915_private.kernel_context is assigned the lowest priority. | ||||
| 	 */ | ||||
| 	int priority; | ||||
| }; | ||||
| 
 | ||||
| /*
 | ||||
|  * "People assume that time is a strict progression of cause to effect, but | ||||
|  * actually, from a nonlinear, non-subjective viewpoint, it's more like a big | ||||
|  * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015 | ||||
|  * | ||||
|  * Requests exist in a complex web of interdependencies. Each request | ||||
|  * has to wait for some other request to complete before it is ready to be run | ||||
|  * (e.g. we have to wait until the pixels have been rendering into a texture | ||||
|  * before we can copy from it). We track the readiness of a request in terms | ||||
|  * of fences, but we also need to keep the dependency tree for the lifetime | ||||
|  * of the request (beyond the life of an individual fence). We use the tree | ||||
|  * at various points to reorder the requests whilst keeping the requests | ||||
|  * in order with respect to their various dependencies. | ||||
|  * | ||||
|  * There is no active component to the "scheduler". As we know the dependency | ||||
|  * DAG of each request, we are able to insert it into a sorted queue when it | ||||
|  * is ready, and are able to reorder its portion of the graph to accommodate | ||||
|  * dynamic priority changes. | ||||
|  */ | ||||
| struct i915_sched_node { | ||||
| 	struct list_head signalers_list; /* those before us, we depend upon */ | ||||
| 	struct list_head waiters_list; /* those after us, they depend upon us */ | ||||
| 	struct list_head link; | ||||
| 	struct i915_sched_attr attr; | ||||
| 	unsigned int flags; | ||||
| #define I915_SCHED_HAS_SEMAPHORE	BIT(0) | ||||
| }; | ||||
| 
 | ||||
| struct i915_dependency { | ||||
| 	struct i915_sched_node *signaler; | ||||
| 	struct list_head signal_link; | ||||
| 	struct list_head wait_link; | ||||
| 	struct list_head dfs_link; | ||||
| 	unsigned long flags; | ||||
| #define I915_DEPENDENCY_ALLOC BIT(0) | ||||
| }; | ||||
| 
 | ||||
| struct i915_priolist { | ||||
| 	struct list_head requests[I915_PRIORITY_COUNT]; | ||||
| 	struct rb_node node; | ||||
| 	unsigned long used; | ||||
| 	int priority; | ||||
| }; | ||||
| #include "i915_scheduler_types.h" | ||||
| 
 | ||||
| #define priolist_for_each_request(it, plist, idx) \ | ||||
| 	for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \ | ||||
|  |  | |||
							
								
								
									
										98
									
								
								drivers/gpu/drm/i915/i915_scheduler_types.h
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										98
									
								
								drivers/gpu/drm/i915/i915_scheduler_types.h
									
										
									
									
									
										Normal file
									
								
							|  | @ -0,0 +1,98 @@ | |||
| /*
 | ||||
|  * SPDX-License-Identifier: MIT | ||||
|  * | ||||
|  * Copyright © 2018 Intel Corporation | ||||
|  */ | ||||
| 
 | ||||
| #ifndef _I915_SCHEDULER_TYPES_H_ | ||||
| #define _I915_SCHEDULER_TYPES_H_ | ||||
| 
 | ||||
| #include <linux/list.h> | ||||
| #include <linux/rbtree.h> | ||||
| 
 | ||||
| #include <uapi/drm/i915_drm.h> | ||||
| 
 | ||||
| struct drm_i915_private; | ||||
| struct i915_request; | ||||
| struct intel_engine_cs; | ||||
| 
 | ||||
| enum { | ||||
| 	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1, | ||||
| 	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY, | ||||
| 	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1, | ||||
| 
 | ||||
| 	I915_PRIORITY_INVALID = INT_MIN | ||||
| }; | ||||
| 
 | ||||
| #define I915_USER_PRIORITY_SHIFT 3 | ||||
| #define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT) | ||||
| 
 | ||||
| #define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT) | ||||
| #define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1) | ||||
| 
 | ||||
| #define I915_PRIORITY_WAIT		((u8)BIT(0)) | ||||
| #define I915_PRIORITY_NEWCLIENT		((u8)BIT(1)) | ||||
| #define I915_PRIORITY_NOSEMAPHORE	((u8)BIT(2)) | ||||
| 
 | ||||
| #define __NO_PREEMPTION (I915_PRIORITY_WAIT) | ||||
| 
 | ||||
| struct i915_sched_attr { | ||||
| 	/**
 | ||||
| 	 * @priority: execution and service priority | ||||
| 	 * | ||||
| 	 * All clients are equal, but some are more equal than others! | ||||
| 	 * | ||||
| 	 * Requests from a context with a greater (more positive) value of | ||||
| 	 * @priority will be executed before those with a lower @priority | ||||
| 	 * value, forming a simple QoS. | ||||
| 	 * | ||||
| 	 * The &drm_i915_private.kernel_context is assigned the lowest priority. | ||||
| 	 */ | ||||
| 	int priority; | ||||
| }; | ||||
| 
 | ||||
| /*
 | ||||
|  * "People assume that time is a strict progression of cause to effect, but | ||||
|  * actually, from a nonlinear, non-subjective viewpoint, it's more like a big | ||||
|  * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015 | ||||
|  * | ||||
|  * Requests exist in a complex web of interdependencies. Each request | ||||
|  * has to wait for some other request to complete before it is ready to be run | ||||
|  * (e.g. we have to wait until the pixels have been rendering into a texture | ||||
|  * before we can copy from it). We track the readiness of a request in terms | ||||
|  * of fences, but we also need to keep the dependency tree for the lifetime | ||||
|  * of the request (beyond the life of an individual fence). We use the tree | ||||
|  * at various points to reorder the requests whilst keeping the requests | ||||
|  * in order with respect to their various dependencies. | ||||
|  * | ||||
|  * There is no active component to the "scheduler". As we know the dependency | ||||
|  * DAG of each request, we are able to insert it into a sorted queue when it | ||||
|  * is ready, and are able to reorder its portion of the graph to accommodate | ||||
|  * dynamic priority changes. | ||||
|  */ | ||||
| struct i915_sched_node { | ||||
| 	struct list_head signalers_list; /* those before us, we depend upon */ | ||||
| 	struct list_head waiters_list; /* those after us, they depend upon us */ | ||||
| 	struct list_head link; | ||||
| 	struct i915_sched_attr attr; | ||||
| 	unsigned int flags; | ||||
| #define I915_SCHED_HAS_SEMAPHORE	BIT(0) | ||||
| }; | ||||
| 
 | ||||
| struct i915_dependency { | ||||
| 	struct i915_sched_node *signaler; | ||||
| 	struct list_head signal_link; | ||||
| 	struct list_head wait_link; | ||||
| 	struct list_head dfs_link; | ||||
| 	unsigned long flags; | ||||
| #define I915_DEPENDENCY_ALLOC BIT(0) | ||||
| }; | ||||
| 
 | ||||
| struct i915_priolist { | ||||
| 	struct list_head requests[I915_PRIORITY_COUNT]; | ||||
| 	struct rb_node node; | ||||
| 	unsigned long used; | ||||
| 	int priority; | ||||
| }; | ||||
| 
 | ||||
| #endif /* _I915_SCHEDULER_TYPES_H_ */ | ||||
|  | @ -27,6 +27,7 @@ | |||
| 
 | ||||
| #include <linux/lockdep.h> | ||||
| 
 | ||||
| #include "i915_active.h" | ||||
| #include "i915_syncmap.h" | ||||
| #include "i915_timeline_types.h" | ||||
| 
 | ||||
|  |  | |||
|  | @ -9,9 +9,10 @@ | |||
| 
 | ||||
| #include <linux/list.h> | ||||
| #include <linux/kref.h> | ||||
| #include <linux/mutex.h> | ||||
| #include <linux/types.h> | ||||
| 
 | ||||
| #include "i915_active.h" | ||||
| #include "i915_active_types.h" | ||||
| 
 | ||||
| struct drm_i915_private; | ||||
| struct i915_vma; | ||||
|  |  | |||
|  | @ -27,6 +27,7 @@ | |||
| 
 | ||||
| #include <uapi/drm/i915_drm.h> | ||||
| 
 | ||||
| #include "intel_engine_types.h" | ||||
| #include "intel_display.h" | ||||
| 
 | ||||
| struct drm_printer; | ||||
|  | @ -165,8 +166,6 @@ struct sseu_dev_info { | |||
| 	u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES]; | ||||
| }; | ||||
| 
 | ||||
| typedef u8 intel_engine_mask_t; | ||||
| 
 | ||||
| struct intel_device_info { | ||||
| 	u16 gen_mask; | ||||
| 
 | ||||
|  |  | |||
|  | @ -13,8 +13,10 @@ | |||
| #include <linux/list.h> | ||||
| #include <linux/types.h> | ||||
| 
 | ||||
| #include "i915_gem.h" | ||||
| #include "i915_scheduler_types.h" | ||||
| #include "i915_selftest.h" | ||||
| #include "i915_timeline_types.h" | ||||
| #include "intel_device_info.h" | ||||
| #include "intel_workarounds_types.h" | ||||
| 
 | ||||
| #include "i915_gem_batch_pool.h" | ||||
|  | @ -25,12 +27,16 @@ | |||
| 
 | ||||
| #define I915_CMD_HASH_ORDER 9 | ||||
| 
 | ||||
| struct dma_fence; | ||||
| struct drm_i915_reg_table; | ||||
| struct i915_gem_context; | ||||
| struct i915_request; | ||||
| struct i915_sched_attr; | ||||
| struct intel_uncore; | ||||
| 
 | ||||
| typedef u8 intel_engine_mask_t; | ||||
| #define ALL_ENGINES ((intel_engine_mask_t)~0ul) | ||||
| 
 | ||||
| struct intel_hw_status_page { | ||||
| 	struct i915_vma *vma; | ||||
| 	u32 *addr; | ||||
|  | @ -105,8 +111,9 @@ enum intel_engine_id { | |||
| 	VCS3, | ||||
| #define _VCS(n) (VCS0 + (n)) | ||||
| 	VECS0, | ||||
| 	VECS1 | ||||
| 	VECS1, | ||||
| #define _VECS(n) (VECS0 + (n)) | ||||
| 	I915_NUM_ENGINES | ||||
| }; | ||||
| 
 | ||||
| struct st_preempt_hang { | ||||
|  |  | |||
|  | @ -29,6 +29,7 @@ | |||
| 
 | ||||
| #include "i915_gem.h" | ||||
| #include "i915_selftest.h" | ||||
| #include "intel_engine_types.h" | ||||
| 
 | ||||
| struct drm_i915_private; | ||||
| 
 | ||||
|  |  | |||
|  | @ -221,8 +221,8 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915, | |||
| 				   unsigned int stuck) | ||||
| { | ||||
| 	struct intel_engine_cs *engine; | ||||
| 	intel_engine_mask_t tmp; | ||||
| 	char msg[80]; | ||||
| 	unsigned int tmp; | ||||
| 	int len; | ||||
| 
 | ||||
| 	/* If some rings hung but others were still busy, only
 | ||||
|  |  | |||
|  | @ -1594,10 +1594,10 @@ out_unlock: | |||
| } | ||||
| 
 | ||||
| static __maybe_unused const char * | ||||
| __engine_name(struct drm_i915_private *i915, unsigned int engines) | ||||
| __engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines) | ||||
| { | ||||
| 	struct intel_engine_cs *engine; | ||||
| 	unsigned int tmp; | ||||
| 	intel_engine_mask_t tmp; | ||||
| 
 | ||||
| 	if (engines == ALL_ENGINES) | ||||
| 		return "all"; | ||||
|  | @ -1610,10 +1610,10 @@ __engine_name(struct drm_i915_private *i915, unsigned int engines) | |||
| 
 | ||||
| static int __igt_switch_to_kernel_context(struct drm_i915_private *i915, | ||||
| 					  struct i915_gem_context *ctx, | ||||
| 					  unsigned int engines) | ||||
| 					  intel_engine_mask_t engines) | ||||
| { | ||||
| 	struct intel_engine_cs *engine; | ||||
| 	unsigned int tmp; | ||||
| 	intel_engine_mask_t tmp; | ||||
| 	int pass; | ||||
| 
 | ||||
| 	GEM_TRACE("Testing %s\n", __engine_name(i915, engines)); | ||||
|  |  | |||
|  | @ -1124,7 +1124,8 @@ static int igt_reset_engines(void *arg) | |||
| 	return 0; | ||||
| } | ||||
| 
 | ||||
| static u32 fake_hangcheck(struct drm_i915_private *i915, u32 mask) | ||||
| static u32 fake_hangcheck(struct drm_i915_private *i915, | ||||
| 			  intel_engine_mask_t mask) | ||||
| { | ||||
| 	u32 count = i915_reset_count(&i915->gpu_error); | ||||
| 
 | ||||
|  |  | |||
|  | @ -0,0 +1,7 @@ | |||
| /*
 | ||||
|  * SPDX-License-Identifier: MIT | ||||
|  * | ||||
|  * Copyright © 2019 Intel Corporation | ||||
|  */ | ||||
| 
 | ||||
| #include "i915_scheduler_types.h" | ||||
		Loading…
	
	Add table
		
		Reference in a new issue
	
	 Chris Wilson
						Chris Wilson