mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-04 08:17:46 +00:00
drm/amdgpu: rework sched_list generation
Generate HW IP's sched_list in amdgpu_ring_init() instead of amdgpu_ctx.c. This makes amdgpu_ctx_init_compute_sched(), ring.has_high_prio and amdgpu_ctx_init_sched() unnecessary. This patch also stores sched_list for all HW IPs in one big array in struct amdgpu_device which makes amdgpu_ctx_init_entity() much more leaner. v2: fix a coding style issue do not use drm hw_ip const to populate amdgpu_ring_type enum v3: remove ctx reference and move sched array and num_sched to a struct use num_scheds to detect uninitialized scheduler list v4: use array_index_nospec for user space controlled variables fix possible checkpatch.pl warnings Signed-off-by: Nirmoy Das <nirmoy.das@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
07e14845d1
commit
1c6d567bdf
35 changed files with 144 additions and 197 deletions
|
@ -853,6 +853,7 @@ struct amdgpu_device {
|
|||
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
|
||||
bool ib_pool_ready;
|
||||
struct amdgpu_sa_manager ring_tmp_bo[AMDGPU_IB_POOL_MAX];
|
||||
struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
|
||||
|
||||
/* interrupts */
|
||||
struct amdgpu_irq irq;
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include "amdgpu.h"
|
||||
#include "amdgpu_sched.h"
|
||||
#include "amdgpu_ras.h"
|
||||
#include <linux/nospec.h>
|
||||
|
||||
#define to_amdgpu_ctx_entity(e) \
|
||||
container_of((e), struct amdgpu_ctx_entity, entity)
|
||||
|
@ -72,13 +73,30 @@ static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sch
|
|||
}
|
||||
}
|
||||
|
||||
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
|
||||
static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
|
||||
enum drm_sched_priority prio,
|
||||
u32 hw_ip)
|
||||
{
|
||||
unsigned int hw_prio;
|
||||
|
||||
hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?
|
||||
amdgpu_ctx_sched_prio_to_compute_prio(prio) :
|
||||
AMDGPU_RING_PRIO_DEFAULT;
|
||||
hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
|
||||
if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
|
||||
hw_prio = AMDGPU_RING_PRIO_DEFAULT;
|
||||
|
||||
return hw_prio;
|
||||
}
|
||||
|
||||
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
|
||||
const u32 ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->adev;
|
||||
struct amdgpu_ctx_entity *entity;
|
||||
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
|
||||
unsigned num_scheds = 0;
|
||||
enum gfx_pipe_priority hw_prio;
|
||||
unsigned int hw_prio;
|
||||
enum drm_sched_priority priority;
|
||||
int r;
|
||||
|
||||
|
@ -90,52 +108,16 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const
|
|||
entity->sequence = 1;
|
||||
priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
|
||||
ctx->init_priority : ctx->override_priority;
|
||||
switch (hw_ip) {
|
||||
case AMDGPU_HW_IP_GFX:
|
||||
sched = &adev->gfx.gfx_ring[0].sched;
|
||||
hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip);
|
||||
|
||||
hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
|
||||
scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
|
||||
num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
|
||||
|
||||
if (hw_ip == AMDGPU_HW_IP_VCN_ENC || hw_ip == AMDGPU_HW_IP_VCN_DEC) {
|
||||
sched = drm_sched_pick_best(scheds, num_scheds);
|
||||
scheds = &sched;
|
||||
num_scheds = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_COMPUTE:
|
||||
hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
|
||||
scheds = adev->gfx.compute_prio_sched[hw_prio];
|
||||
num_scheds = adev->gfx.num_compute_sched[hw_prio];
|
||||
break;
|
||||
case AMDGPU_HW_IP_DMA:
|
||||
scheds = adev->sdma.sdma_sched;
|
||||
num_scheds = adev->sdma.num_sdma_sched;
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD:
|
||||
sched = &adev->uvd.inst[0].ring.sched;
|
||||
scheds = &sched;
|
||||
num_scheds = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCE:
|
||||
sched = &adev->vce.ring[0].sched;
|
||||
scheds = &sched;
|
||||
num_scheds = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_UVD_ENC:
|
||||
sched = &adev->uvd.inst[0].ring_enc[0].sched;
|
||||
scheds = &sched;
|
||||
num_scheds = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_DEC:
|
||||
sched = drm_sched_pick_best(adev->vcn.vcn_dec_sched,
|
||||
adev->vcn.num_vcn_dec_sched);
|
||||
scheds = &sched;
|
||||
num_scheds = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_ENC:
|
||||
sched = drm_sched_pick_best(adev->vcn.vcn_enc_sched,
|
||||
adev->vcn.num_vcn_enc_sched);
|
||||
scheds = &sched;
|
||||
num_scheds = 1;
|
||||
break;
|
||||
case AMDGPU_HW_IP_VCN_JPEG:
|
||||
scheds = adev->jpeg.jpeg_sched;
|
||||
num_scheds = adev->jpeg.num_jpeg_sched;
|
||||
break;
|
||||
}
|
||||
|
||||
r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
|
||||
|
@ -178,7 +160,6 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
|||
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
|
||||
|
@ -525,7 +506,7 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
|
|||
enum drm_sched_priority priority)
|
||||
{
|
||||
struct amdgpu_device *adev = ctx->adev;
|
||||
enum gfx_pipe_priority hw_prio;
|
||||
unsigned int hw_prio;
|
||||
struct drm_gpu_scheduler **scheds = NULL;
|
||||
unsigned num_scheds;
|
||||
|
||||
|
@ -534,9 +515,11 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
|
|||
|
||||
/* set hw priority */
|
||||
if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
|
||||
hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
|
||||
scheds = adev->gfx.compute_prio_sched[hw_prio];
|
||||
num_scheds = adev->gfx.num_compute_sched[hw_prio];
|
||||
hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority,
|
||||
AMDGPU_HW_IP_COMPUTE);
|
||||
hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
|
||||
scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
|
||||
num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
|
||||
drm_sched_entity_modify_sched(&aentity->entity, scheds,
|
||||
num_scheds);
|
||||
}
|
||||
|
@ -665,78 +648,3 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
|
|||
idr_destroy(&mgr->ctx_handles);
|
||||
mutex_destroy(&mgr->lock);
|
||||
}
|
||||
|
||||
|
||||
static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
|
||||
{
|
||||
int num_compute_sched_normal = 0;
|
||||
int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
|
||||
int i;
|
||||
|
||||
/* use one drm sched array, gfx.compute_sched to store both high and
|
||||
* normal priority drm compute schedulers */
|
||||
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
|
||||
if (!adev->gfx.compute_ring[i].has_high_prio)
|
||||
adev->gfx.compute_sched[num_compute_sched_normal++] =
|
||||
&adev->gfx.compute_ring[i].sched;
|
||||
else
|
||||
adev->gfx.compute_sched[num_compute_sched_high--] =
|
||||
&adev->gfx.compute_ring[i].sched;
|
||||
}
|
||||
|
||||
/* compute ring only has two priority for now */
|
||||
i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
|
||||
adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
|
||||
adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
|
||||
|
||||
i = AMDGPU_GFX_PIPE_PRIO_HIGH;
|
||||
if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
|
||||
/* When compute has no high priority rings then use */
|
||||
/* normal priority sched array */
|
||||
adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
|
||||
adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
|
||||
} else {
|
||||
adev->gfx.compute_prio_sched[i] =
|
||||
&adev->gfx.compute_sched[num_compute_sched_high - 1];
|
||||
adev->gfx.num_compute_sched[i] =
|
||||
adev->gfx.num_compute_rings - num_compute_sched_normal;
|
||||
}
|
||||
}
|
||||
|
||||
void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
amdgpu_ctx_init_compute_sched(adev);
|
||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
|
||||
adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
|
||||
adev->gfx.num_gfx_sched++;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
|
||||
adev->sdma.num_sdma_sched++;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
|
||||
&adev->vcn.inst[i].ring_dec.sched;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
for (j = 0; j < adev->vcn.num_enc_rings; ++j)
|
||||
adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
|
||||
&adev->vcn.inst[i].ring_enc[j].sched;
|
||||
}
|
||||
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
continue;
|
||||
adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
|
||||
&adev->jpeg.inst[i].ring_dec.sched;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -88,7 +88,4 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
|
|||
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
|
||||
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
|
||||
|
||||
void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -3210,8 +3210,6 @@ fence_driver_init:
|
|||
adev->gfx.config.max_cu_per_sh,
|
||||
adev->gfx.cu_info.number);
|
||||
|
||||
amdgpu_ctx_init_sched(adev);
|
||||
|
||||
adev->accel_working = true;
|
||||
|
||||
amdgpu_vm_check_compute_bug(adev);
|
||||
|
|
|
@ -320,7 +320,8 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
|
|||
ring->eop_gpu_addr = kiq->eop_gpu_addr;
|
||||
sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
irq, AMDGPU_CP_KIQ_IRQ_DRIVER0);
|
||||
irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
|
||||
|
||||
|
|
|
@ -286,13 +286,8 @@ struct amdgpu_gfx {
|
|||
bool me_fw_write_wait;
|
||||
bool cp_fw_write_wait;
|
||||
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
|
||||
struct drm_gpu_scheduler *gfx_sched[AMDGPU_MAX_GFX_RINGS];
|
||||
uint32_t num_gfx_sched;
|
||||
unsigned num_gfx_rings;
|
||||
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
|
||||
struct drm_gpu_scheduler **compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
|
||||
struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
|
||||
uint32_t num_compute_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
|
||||
unsigned num_compute_rings;
|
||||
struct amdgpu_irq_src eop_irq;
|
||||
struct amdgpu_irq_src priv_reg_irq;
|
||||
|
|
|
@ -43,8 +43,6 @@ struct amdgpu_jpeg {
|
|||
uint8_t num_jpeg_inst;
|
||||
struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
|
||||
struct amdgpu_jpeg_reg internal;
|
||||
struct drm_gpu_scheduler *jpeg_sched[AMDGPU_MAX_JPEG_INSTANCES];
|
||||
uint32_t num_jpeg_sched;
|
||||
unsigned harvest_config;
|
||||
struct delayed_work idle_work;
|
||||
enum amd_powergating_state cur_state;
|
||||
|
|
|
@ -162,11 +162,13 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
|
|||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
unsigned max_dw, struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type)
|
||||
unsigned int max_dw, struct amdgpu_irq_src *irq_src,
|
||||
unsigned int irq_type, unsigned int hw_prio)
|
||||
{
|
||||
int r, i;
|
||||
int sched_hw_submission = amdgpu_sched_hw_submission;
|
||||
u32 *num_sched;
|
||||
u32 hw_ip;
|
||||
|
||||
/* Set the hw submission limit higher for KIQ because
|
||||
* it's used for a number of gfx/compute tasks by both
|
||||
|
@ -258,6 +260,14 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
|||
ring->priority = DRM_SCHED_PRIORITY_NORMAL;
|
||||
mutex_init(&ring->priority_mutex);
|
||||
|
||||
if (ring->funcs->type >= AMDGPU_RING_TYPE_GFX &&
|
||||
ring->funcs->type <= AMDGPU_RING_TYPE_VCN_JPEG) {
|
||||
hw_ip = ring->funcs->type;
|
||||
num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
|
||||
adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
|
||||
&ring->sched;
|
||||
}
|
||||
|
||||
for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
|
||||
atomic_set(&ring->num_jobs[i], 0);
|
||||
|
||||
|
|
|
@ -30,11 +30,15 @@
|
|||
|
||||
/* max number of rings */
|
||||
#define AMDGPU_MAX_RINGS 28
|
||||
#define AMDGPU_MAX_HWIP_RINGS 8
|
||||
#define AMDGPU_MAX_GFX_RINGS 2
|
||||
#define AMDGPU_MAX_COMPUTE_RINGS 8
|
||||
#define AMDGPU_MAX_VCE_RINGS 3
|
||||
#define AMDGPU_MAX_UVD_ENC_RINGS 2
|
||||
|
||||
#define AMDGPU_RING_PRIO_DEFAULT 1
|
||||
#define AMDGPU_RING_PRIO_MAX AMDGPU_GFX_PIPE_PRIO_MAX
|
||||
|
||||
/* some special values for the owner field */
|
||||
#define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul)
|
||||
#define AMDGPU_FENCE_OWNER_VM ((void *)1ul)
|
||||
|
@ -65,6 +69,11 @@ struct amdgpu_ib;
|
|||
struct amdgpu_cs_parser;
|
||||
struct amdgpu_job;
|
||||
|
||||
struct amdgpu_sched {
|
||||
u32 num_scheds;
|
||||
struct drm_gpu_scheduler *sched[AMDGPU_MAX_HWIP_RINGS];
|
||||
};
|
||||
|
||||
/*
|
||||
* Fences.
|
||||
*/
|
||||
|
@ -219,7 +228,6 @@ struct amdgpu_ring {
|
|||
struct mutex priority_mutex;
|
||||
/* protected by priority_mutex */
|
||||
int priority;
|
||||
bool has_high_prio;
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
struct dentry *ent;
|
||||
|
@ -257,8 +265,8 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
|
|||
void amdgpu_ring_commit(struct amdgpu_ring *ring);
|
||||
void amdgpu_ring_undo(struct amdgpu_ring *ring);
|
||||
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
|
||||
unsigned ring_size, struct amdgpu_irq_src *irq_src,
|
||||
unsigned irq_type);
|
||||
unsigned int ring_size, struct amdgpu_irq_src *irq_src,
|
||||
unsigned int irq_type, unsigned int prio);
|
||||
void amdgpu_ring_fini(struct amdgpu_ring *ring);
|
||||
void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
|
||||
uint32_t reg0, uint32_t val0,
|
||||
|
|
|
@ -61,8 +61,6 @@ struct amdgpu_sdma_ras_funcs {
|
|||
|
||||
struct amdgpu_sdma {
|
||||
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
|
||||
struct drm_gpu_scheduler *sdma_sched[AMDGPU_MAX_SDMA_INSTANCES];
|
||||
uint32_t num_sdma_sched;
|
||||
struct amdgpu_irq_src trap_irq;
|
||||
struct amdgpu_irq_src illegal_inst_irq;
|
||||
struct amdgpu_irq_src ecc_irq;
|
||||
|
|
|
@ -207,10 +207,6 @@ struct amdgpu_vcn {
|
|||
uint8_t num_vcn_inst;
|
||||
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
|
||||
struct amdgpu_vcn_reg internal;
|
||||
struct drm_gpu_scheduler *vcn_enc_sched[AMDGPU_MAX_VCN_ENC_RINGS];
|
||||
struct drm_gpu_scheduler *vcn_dec_sched[AMDGPU_MAX_VCN_INSTANCES];
|
||||
uint32_t num_vcn_enc_sched;
|
||||
uint32_t num_vcn_dec_sched;
|
||||
struct mutex vcn_pg_lock;
|
||||
atomic_t total_submission_cnt;
|
||||
|
||||
|
|
|
@ -979,7 +979,8 @@ static int cik_sdma_sw_init(void *handle)
|
|||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 :
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1);
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -1299,7 +1299,8 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
|
|||
|
||||
irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->gfx.eop_irq, irq_type);
|
||||
&adev->gfx.eop_irq, irq_type,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
return 0;
|
||||
|
@ -1310,7 +1311,8 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
|
|||
{
|
||||
int r;
|
||||
unsigned irq_type;
|
||||
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
|
||||
struct amdgpu_ring *ring;
|
||||
unsigned int hw_prio;
|
||||
|
||||
ring = &adev->gfx.compute_ring[ring_id];
|
||||
|
||||
|
@ -1329,10 +1331,11 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
|
|||
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
|
||||
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
|
||||
+ ring->pipe;
|
||||
|
||||
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
|
||||
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
|
||||
/* type-2 packets are deprecated on MEC, use type-3 instead */
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->gfx.eop_irq, irq_type);
|
||||
&adev->gfx.eop_irq, irq_type, hw_prio);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -3261,11 +3264,8 @@ static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct
|
|||
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
|
||||
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
|
||||
ring->has_high_prio = true;
|
||||
mqd->cp_hqd_queue_priority =
|
||||
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
|
||||
} else {
|
||||
ring->has_high_prio = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3110,7 +3110,9 @@ static int gfx_v6_0_sw_init(void *handle)
|
|||
ring->ring_obj = NULL;
|
||||
sprintf(ring->name, "gfx");
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
|
||||
&adev->gfx.eop_irq,
|
||||
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -3132,7 +3134,8 @@ static int gfx_v6_0_sw_init(void *handle)
|
|||
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
|
||||
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->gfx.eop_irq, irq_type);
|
||||
&adev->gfx.eop_irq, irq_type,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -4433,7 +4433,8 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
|
|||
|
||||
/* type-2 packets are deprecated on MEC, use type-3 instead */
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->gfx.eop_irq, irq_type);
|
||||
&adev->gfx.eop_irq, irq_type,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -4505,7 +4506,9 @@ static int gfx_v7_0_sw_init(void *handle)
|
|||
ring->ring_obj = NULL;
|
||||
sprintf(ring->name, "gfx");
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
|
||||
&adev->gfx.eop_irq,
|
||||
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -1894,6 +1894,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
|
|||
int r;
|
||||
unsigned irq_type;
|
||||
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
|
||||
unsigned int hw_prio;
|
||||
|
||||
ring = &adev->gfx.compute_ring[ring_id];
|
||||
|
||||
|
@ -1913,9 +1914,11 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
|
|||
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
|
||||
+ ring->pipe;
|
||||
|
||||
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
|
||||
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
|
||||
/* type-2 packets are deprecated on MEC, use type-3 instead */
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->gfx.eop_irq, irq_type);
|
||||
&adev->gfx.eop_irq, irq_type, hw_prio);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -2019,7 +2022,8 @@ static int gfx_v8_0_sw_init(void *handle)
|
|||
}
|
||||
|
||||
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
|
||||
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
|
||||
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -4432,11 +4436,8 @@ static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *m
|
|||
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
|
||||
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
|
||||
ring->has_high_prio = true;
|
||||
mqd->cp_hqd_queue_priority =
|
||||
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
|
||||
} else {
|
||||
ring->has_high_prio = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2190,6 +2190,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
|
|||
int r;
|
||||
unsigned irq_type;
|
||||
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
|
||||
unsigned int hw_prio;
|
||||
|
||||
ring = &adev->gfx.compute_ring[ring_id];
|
||||
|
||||
|
@ -2208,10 +2209,11 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
|
|||
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
|
||||
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
|
||||
+ ring->pipe;
|
||||
|
||||
hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
|
||||
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
|
||||
/* type-2 packets are deprecated on MEC, use type-3 instead */
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->gfx.eop_irq, irq_type);
|
||||
&adev->gfx.eop_irq, irq_type, hw_prio);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -2305,7 +2307,9 @@ static int gfx_v9_0_sw_init(void *handle)
|
|||
ring->use_doorbell = true;
|
||||
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
|
||||
&adev->gfx.eop_irq,
|
||||
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -3369,11 +3373,8 @@ static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *m
|
|||
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
|
||||
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
|
||||
ring->has_high_prio = true;
|
||||
mqd->cp_hqd_queue_priority =
|
||||
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
|
||||
} else {
|
||||
ring->has_high_prio = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -480,7 +480,8 @@ int jpeg_v1_0_sw_init(void *handle)
|
|||
|
||||
ring = &adev->jpeg.inst->ring_dec;
|
||||
sprintf(ring->name, "jpeg_dec");
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
|
||||
0, AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -106,7 +106,8 @@ static int jpeg_v2_0_sw_init(void *handle)
|
|||
ring->use_doorbell = true;
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
|
||||
sprintf(ring->name, "jpeg_dec");
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
|
||||
0, AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -118,7 +118,8 @@ static int jpeg_v2_5_sw_init(void *handle)
|
|||
ring->use_doorbell = true;
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
|
||||
sprintf(ring->name, "jpeg_dec_%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
|
||||
0, AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -873,7 +873,8 @@ static int sdma_v2_4_sw_init(void *handle)
|
|||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 :
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1);
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -1157,7 +1157,8 @@ static int sdma_v3_0_sw_init(void *handle)
|
|||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 :
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1);
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -1859,7 +1859,8 @@ static int sdma_v4_0_sw_init(void *handle)
|
|||
|
||||
sprintf(ring->name, "sdma%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 + i);
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -1877,7 +1878,8 @@ static int sdma_v4_0_sw_init(void *handle)
|
|||
sprintf(ring->name, "page%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 1024,
|
||||
&adev->sdma.trap_irq,
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 + i);
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 + i,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -1272,7 +1272,8 @@ static int sdma_v5_0_sw_init(void *handle)
|
|||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 :
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1);
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -504,7 +504,8 @@ static int si_dma_sw_init(void *handle)
|
|||
&adev->sdma.trap_irq,
|
||||
(i == 0) ?
|
||||
AMDGPU_SDMA_IRQ_INSTANCE0 :
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1);
|
||||
AMDGPU_SDMA_IRQ_INSTANCE1,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -118,7 +118,8 @@ static int uvd_v4_2_sw_init(void *handle)
|
|||
|
||||
ring = &adev->uvd.inst->ring;
|
||||
sprintf(ring->name, "uvd");
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -116,7 +116,8 @@ static int uvd_v5_0_sw_init(void *handle)
|
|||
|
||||
ring = &adev->uvd.inst->ring;
|
||||
sprintf(ring->name, "uvd");
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -418,7 +418,8 @@ static int uvd_v6_0_sw_init(void *handle)
|
|||
|
||||
ring = &adev->uvd.inst->ring;
|
||||
sprintf(ring->name, "uvd");
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -430,7 +431,9 @@ static int uvd_v6_0_sw_init(void *handle)
|
|||
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
|
||||
ring = &adev->uvd.inst->ring_enc[i];
|
||||
sprintf(ring->name, "uvd_enc%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512,
|
||||
&adev->uvd.inst->irq, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -452,7 +452,9 @@ static int uvd_v7_0_sw_init(void *handle)
|
|||
if (!amdgpu_sriov_vf(adev)) {
|
||||
ring = &adev->uvd.inst[j].ring;
|
||||
sprintf(ring->name, "uvd_%d", ring->me);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512,
|
||||
&adev->uvd.inst[j].irq, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
@ -471,7 +473,9 @@ static int uvd_v7_0_sw_init(void *handle)
|
|||
else
|
||||
ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
|
||||
}
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512,
|
||||
&adev->uvd.inst[j].irq, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -434,7 +434,8 @@ static int vce_v2_0_sw_init(void *handle)
|
|||
ring = &adev->vce.ring[i];
|
||||
sprintf(ring->name, "vce%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 512,
|
||||
&adev->vce.irq, 0);
|
||||
&adev->vce.irq, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -442,7 +442,8 @@ static int vce_v3_0_sw_init(void *handle)
|
|||
for (i = 0; i < adev->vce.num_rings; i++) {
|
||||
ring = &adev->vce.ring[i];
|
||||
sprintf(ring->name, "vce%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -476,7 +476,8 @@ static int vce_v4_0_sw_init(void *handle)
|
|||
else
|
||||
ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
|
||||
}
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -127,7 +127,8 @@ static int vcn_v1_0_sw_init(void *handle)
|
|||
|
||||
ring = &adev->vcn.inst->ring_dec;
|
||||
sprintf(ring->name, "vcn_dec");
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -145,7 +146,8 @@ static int vcn_v1_0_sw_init(void *handle)
|
|||
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
|
||||
ring = &adev->vcn.inst->ring_enc[i];
|
||||
sprintf(ring->name, "vcn_enc%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -134,7 +134,8 @@ static int vcn_v2_0_sw_init(void *handle)
|
|||
ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
|
||||
|
||||
sprintf(ring->name, "vcn_dec");
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -164,7 +165,8 @@ static int vcn_v2_0_sw_init(void *handle)
|
|||
else
|
||||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
|
||||
sprintf(ring->name, "vcn_enc%d", i);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -193,7 +193,8 @@ static int vcn_v2_5_sw_init(void *handle)
|
|||
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
|
||||
(amdgpu_sriov_vf(adev) ? 2*j : 8*j);
|
||||
sprintf(ring->name, "vcn_dec_%d", j);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
|
||||
0, AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -205,7 +206,9 @@ static int vcn_v2_5_sw_init(void *handle)
|
|||
(amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
|
||||
|
||||
sprintf(ring->name, "vcn_enc_%d.%d", j, i);
|
||||
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
|
||||
r = amdgpu_ring_init(adev, ring, 512,
|
||||
&adev->vcn.inst[j].irq, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue