2019-06-21 08:07:41 +01:00
|
|
|
/* SPDX-License-Identifier: MIT */
|
|
|
|
/*
|
|
|
|
* Copyright © 2019 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __INTEL_GT__
|
|
|
|
#define __INTEL_GT__
|
|
|
|
|
2019-06-21 08:07:44 +01:00
|
|
|
#include "intel_engine_types.h"
|
2019-06-21 08:07:41 +01:00
|
|
|
#include "intel_gt_types.h"
|
2019-07-12 20:29:53 +01:00
|
|
|
#include "intel_reset.h"
|
2019-06-21 08:07:41 +01:00
|
|
|
|
2019-06-21 08:07:42 +01:00
|
|
|
struct drm_i915_private;
|
2020-07-07 17:39:47 -07:00
|
|
|
struct drm_printer;
|
2019-06-21 08:07:42 +01:00
|
|
|
|
2023-08-21 11:06:23 -07:00
|
|
|
/*
|
|
|
|
* Check that the GT is a graphics GT and has an IP version within the
|
|
|
|
* specified range (inclusive).
|
|
|
|
*/
|
|
|
|
#define IS_GFX_GT_IP_RANGE(gt, from, until) ( \
|
|
|
|
BUILD_BUG_ON_ZERO((from) < IP_VER(2, 0)) + \
|
|
|
|
BUILD_BUG_ON_ZERO((until) < (from)) + \
|
|
|
|
((gt)->type != GT_MEDIA && \
|
|
|
|
GRAPHICS_VER_FULL((gt)->i915) >= (from) && \
|
|
|
|
GRAPHICS_VER_FULL((gt)->i915) <= (until)))
|
|
|
|
|
2023-08-21 11:06:25 -07:00
|
|
|
/*
|
|
|
|
* Check that the GT is a media GT and has an IP version within the
|
|
|
|
* specified range (inclusive).
|
|
|
|
*
|
|
|
|
* Only usable on platforms with a standalone media design (i.e., IP version 13
|
|
|
|
* and higher).
|
|
|
|
*/
|
|
|
|
#define IS_MEDIA_GT_IP_RANGE(gt, from, until) ( \
|
|
|
|
BUILD_BUG_ON_ZERO((from) < IP_VER(13, 0)) + \
|
|
|
|
BUILD_BUG_ON_ZERO((until) < (from)) + \
|
|
|
|
((gt) && (gt)->type == GT_MEDIA && \
|
|
|
|
MEDIA_VER_FULL((gt)->i915) >= (from) && \
|
|
|
|
MEDIA_VER_FULL((gt)->i915) <= (until)))
|
|
|
|
|
2023-08-21 11:06:24 -07:00
|
|
|
/*
|
|
|
|
* Check that the GT is a graphics GT with a specific IP version and has
|
|
|
|
* a stepping in the range [from, until). The lower stepping bound is
|
|
|
|
* inclusive, the upper bound is exclusive. The most common use-case of this
|
|
|
|
* macro is for checking bounds for workarounds, which usually have a stepping
|
|
|
|
* ("from") at which the hardware issue is first present and another stepping
|
|
|
|
* ("until") at which a hardware fix is present and the software workaround is
|
|
|
|
* no longer necessary. E.g.,
|
|
|
|
*
|
|
|
|
* IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0)
|
|
|
|
* IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B1, STEP_FOREVER)
|
|
|
|
*
|
|
|
|
* "STEP_FOREVER" can be passed as "until" for workarounds that have no upper
|
|
|
|
* stepping bound for the specified IP version.
|
|
|
|
*/
|
|
|
|
#define IS_GFX_GT_IP_STEP(gt, ipver, from, until) ( \
|
|
|
|
BUILD_BUG_ON_ZERO((until) <= (from)) + \
|
|
|
|
(IS_GFX_GT_IP_RANGE((gt), (ipver), (ipver)) && \
|
|
|
|
IS_GRAPHICS_STEP((gt)->i915, (from), (until))))
|
|
|
|
|
2023-08-21 11:06:25 -07:00
|
|
|
/*
|
|
|
|
* Check that the GT is a media GT with a specific IP version and has
|
|
|
|
* a stepping in the range [from, until). The lower stepping bound is
|
|
|
|
* inclusive, the upper bound is exclusive. The most common use-case of this
|
|
|
|
* macro is for checking bounds for workarounds, which usually have a stepping
|
|
|
|
* ("from") at which the hardware issue is first present and another stepping
|
|
|
|
* ("until") at which a hardware fix is present and the software workaround is
|
|
|
|
* no longer necessary. "STEP_FOREVER" can be passed as "until" for
|
|
|
|
* workarounds that have no upper stepping bound for the specified IP version.
|
|
|
|
*
|
|
|
|
* This macro may only be used to match on platforms that have a standalone
|
|
|
|
* media design (i.e., media version 13 or higher).
|
|
|
|
*/
|
|
|
|
#define IS_MEDIA_GT_IP_STEP(gt, ipver, from, until) ( \
|
|
|
|
BUILD_BUG_ON_ZERO((until) <= (from)) + \
|
|
|
|
(IS_MEDIA_GT_IP_RANGE((gt), (ipver), (ipver)) && \
|
|
|
|
IS_MEDIA_STEP((gt)->i915, (from), (until))))
|
|
|
|
|
2019-12-16 10:53:32 -08:00
|
|
|
#define GT_TRACE(gt, fmt, ...) do { \
|
|
|
|
const struct intel_gt *gt__ __maybe_unused = (gt); \
|
2020-01-16 12:57:49 +00:00
|
|
|
GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
|
2019-12-13 07:51:52 -08:00
|
|
|
##__VA_ARGS__); \
|
|
|
|
} while (0)
|
|
|
|
|
2022-03-19 01:39:32 +02:00
|
|
|
static inline bool gt_is_root(struct intel_gt *gt)
|
|
|
|
{
|
|
|
|
return !gt->info.id;
|
|
|
|
}
|
|
|
|
|
2024-03-27 21:05:46 +01:00
|
|
|
bool intel_gt_needs_wa_16018031267(struct intel_gt *gt);
|
2023-09-21 19:24:56 +03:00
|
|
|
bool intel_gt_needs_wa_22016122933(struct intel_gt *gt);
|
2023-08-07 14:19:57 +02:00
|
|
|
|
2024-03-27 21:05:46 +01:00
|
|
|
#define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \
|
|
|
|
intel_gt_needs_wa_16018031267(engine->gt) && \
|
|
|
|
engine->class == COPY_ENGINE_CLASS && engine->instance == 0)
|
|
|
|
|
2019-07-13 11:00:13 +01:00
|
|
|
static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
|
|
|
|
{
|
|
|
|
return container_of(uc, struct intel_gt, uc);
|
|
|
|
}
|
|
|
|
|
2019-07-13 11:00:14 +01:00
|
|
|
static inline struct intel_gt *guc_to_gt(struct intel_guc *guc)
|
|
|
|
{
|
|
|
|
return container_of(guc, struct intel_gt, uc.guc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
|
|
|
|
{
|
|
|
|
return container_of(huc, struct intel_gt, uc.huc);
|
|
|
|
}
|
|
|
|
|
2022-12-08 12:05:16 -08:00
|
|
|
static inline struct intel_gt *gsc_uc_to_gt(struct intel_gsc_uc *gsc_uc)
|
|
|
|
{
|
|
|
|
return container_of(gsc_uc, struct intel_gt, uc.gsc);
|
|
|
|
}
|
|
|
|
|
2022-04-19 12:33:08 -07:00
|
|
|
static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
|
|
|
|
{
|
|
|
|
return container_of(gsc, struct intel_gt, gsc);
|
|
|
|
}
|
|
|
|
|
2023-12-06 19:43:22 +01:00
|
|
|
static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
|
|
|
|
{
|
|
|
|
return guc_to_gt(guc)->i915;
|
|
|
|
}
|
|
|
|
|
2023-12-29 11:27:31 +01:00
|
|
|
static inline struct intel_guc *gt_to_guc(struct intel_gt *gt)
|
|
|
|
{
|
|
|
|
return >->uc.guc;
|
|
|
|
}
|
|
|
|
|
2022-09-06 16:49:26 -07:00
|
|
|
void intel_gt_common_init_early(struct intel_gt *gt);
|
2022-09-06 16:49:33 -07:00
|
|
|
int intel_root_gt_init_early(struct drm_i915_private *i915);
|
2021-12-19 23:25:00 +02:00
|
|
|
int intel_gt_assign_ggtt(struct intel_gt *gt);
|
2020-07-07 17:39:48 -07:00
|
|
|
int intel_gt_init_mmio(struct intel_gt *gt);
|
2019-09-10 15:38:20 +01:00
|
|
|
int __must_check intel_gt_init_hw(struct intel_gt *gt);
|
2019-09-05 14:14:03 +03:00
|
|
|
int intel_gt_init(struct intel_gt *gt);
|
|
|
|
void intel_gt_driver_register(struct intel_gt *gt);
|
|
|
|
|
|
|
|
void intel_gt_driver_unregister(struct intel_gt *gt);
|
|
|
|
void intel_gt_driver_remove(struct intel_gt *gt);
|
|
|
|
void intel_gt_driver_release(struct intel_gt *gt);
|
2022-03-19 01:39:33 +02:00
|
|
|
void intel_gt_driver_late_release_all(struct drm_i915_private *i915);
|
2019-07-12 20:29:53 +01:00
|
|
|
|
2021-07-21 14:50:58 -07:00
|
|
|
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
|
|
|
|
|
2019-06-21 08:07:44 +01:00
|
|
|
void intel_gt_check_and_clear_faults(struct intel_gt *gt);
|
2022-09-10 07:38:43 -07:00
|
|
|
i915_reg_t intel_gt_perf_limit_reasons_reg(struct intel_gt *gt);
|
2019-06-21 08:07:44 +01:00
|
|
|
void intel_gt_clear_error_registers(struct intel_gt *gt,
|
|
|
|
intel_engine_mask_t engine_mask);
|
|
|
|
|
2019-06-21 08:08:01 +01:00
|
|
|
void intel_gt_flush_ggtt_writes(struct intel_gt *gt);
|
2019-06-21 08:08:02 +01:00
|
|
|
void intel_gt_chipset_flush(struct intel_gt *gt);
|
2019-06-21 08:08:01 +01:00
|
|
|
|
2019-07-09 15:33:43 +03:00
|
|
|
static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
|
|
|
|
enum intel_gt_scratch_field field)
|
2019-06-21 08:08:11 +01:00
|
|
|
{
|
2019-07-09 15:33:43 +03:00
|
|
|
return i915_ggtt_offset(gt->scratch) + field;
|
2019-06-21 08:08:11 +01:00
|
|
|
}
|
|
|
|
|
2020-07-06 16:41:05 +02:00
|
|
|
static inline bool intel_gt_has_unrecoverable_error(const struct intel_gt *gt)
|
2019-07-12 20:29:53 +01:00
|
|
|
{
|
2020-07-06 16:41:05 +02:00
|
|
|
return test_bit(I915_WEDGED_ON_INIT, >->reset.flags) ||
|
|
|
|
test_bit(I915_WEDGED_ON_FINI, >->reset.flags);
|
2019-07-12 20:29:53 +01:00
|
|
|
}
|
|
|
|
|
2020-07-06 16:41:05 +02:00
|
|
|
static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
|
2019-12-26 19:12:37 +00:00
|
|
|
{
|
2020-07-06 16:41:05 +02:00
|
|
|
GEM_BUG_ON(intel_gt_has_unrecoverable_error(gt) &&
|
|
|
|
!test_bit(I915_WEDGED, >->reset.flags));
|
|
|
|
|
|
|
|
return unlikely(test_bit(I915_WEDGED, >->reset.flags));
|
2019-12-26 19:12:37 +00:00
|
|
|
}
|
|
|
|
|
2022-03-19 01:39:33 +02:00
|
|
|
int intel_gt_probe_all(struct drm_i915_private *i915);
|
|
|
|
int intel_gt_tiles_init(struct drm_i915_private *i915);
|
|
|
|
|
|
|
|
#define for_each_gt(gt__, i915__, id__) \
|
|
|
|
for ((id__) = 0; \
|
|
|
|
(id__) < I915_MAX_GT; \
|
|
|
|
(id__)++) \
|
|
|
|
for_each_if(((gt__) = (i915__)->gt[(id__)]))
|
|
|
|
|
2023-11-02 09:32:48 +00:00
|
|
|
/* Simple iterator over all initialised engines */
|
|
|
|
#define for_each_engine(engine__, gt__, id__) \
|
|
|
|
for ((id__) = 0; \
|
|
|
|
(id__) < I915_NUM_ENGINES; \
|
|
|
|
(id__)++) \
|
|
|
|
for_each_if ((engine__) = (gt__)->engine[(id__)])
|
|
|
|
|
|
|
|
/* Iterator over subset of engines selected by mask */
|
|
|
|
#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
|
|
|
|
for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
|
|
|
|
(tmp__) ? \
|
|
|
|
((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
|
|
|
|
0;)
|
|
|
|
|
2020-07-07 17:39:47 -07:00
|
|
|
void intel_gt_info_print(const struct intel_gt_info *info,
|
|
|
|
struct drm_printer *p);
|
|
|
|
|
2021-03-24 12:13:33 +00:00
|
|
|
void intel_gt_watchdog_work(struct work_struct *work);
|
|
|
|
|
2023-08-07 14:19:56 +02:00
|
|
|
enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
|
|
|
|
struct drm_i915_gem_object *obj,
|
|
|
|
bool always_coherent);
|
|
|
|
|
2023-09-26 10:37:37 +02:00
|
|
|
void intel_gt_bind_context_set_ready(struct intel_gt *gt);
|
|
|
|
void intel_gt_bind_context_set_unready(struct intel_gt *gt);
|
|
|
|
bool intel_gt_is_bind_context_ready(struct intel_gt *gt);
|
2024-08-07 10:10:14 +01:00
|
|
|
|
|
|
|
static inline void intel_gt_set_wedged_async(struct intel_gt *gt)
|
|
|
|
{
|
|
|
|
queue_work(system_highpri_wq, >->wedge);
|
|
|
|
}
|
|
|
|
|
2019-06-21 08:07:41 +01:00
|
|
|
#endif /* __INTEL_GT_H__ */
|