2019-02-12 22:28:13 +10:00
|
|
|
#ifndef __NVKM_GSP_H__
|
|
|
|
#define __NVKM_GSP_H__
|
|
|
|
#define nvkm_gsp(p) container_of((p), struct nvkm_gsp, subdev)
|
|
|
|
#include <core/subdev.h>
|
2020-01-15 06:34:21 +10:00
|
|
|
#include <core/falcon.h>
|
2023-09-19 06:21:37 +10:00
|
|
|
#include <core/firmware.h>
|
|
|
|
|
drm/nouveau: expose GSP-RM logging buffers via debugfs
The LOGINIT, LOGINTR, LOGRM, and LOGPMU buffers are circular buffers
that have printf-like logs from GSP-RM and PMU encoded in them.
LOGINIT, LOGINTR, and LOGRM are allocated by Nouveau and their DMA
addresses are passed to GSP-RM during initialization. The buffers are
required for GSP-RM to initialize properly.
LOGPMU is also allocated by Nouveau, but its contents are updated
when Nouveau receives an NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT RPC from
GSP-RM. Nouveau then copies the RPC to the buffer.
The messages are encoded as an array of variable-length structures that
contain the parameters to an NV_PRINTF call. The format string and
parameter count are stored in a special ELF image that contains only
logging strings. This image is not currently shipped with the Nvidia
driver.
There are two methods to extract the logs.
OpenRM tries to load the logging ELF, and if present, parses the log
buffers in real time and outputs the strings to the kernel console.
Alternatively, and this is the method used by this patch, the buffers
can be exposed to user space, and a user-space tool (along with the
logging ELF image) can parse the buffer and dump the logs.
This method has the advantage that it allows the buffers to be parsed
even when the logging ELF file is not available to the user. However,
it has the disadvantage the debugfs entries need to remain until the
driver is unloaded.
The buffers are exposed via debugfs. If GSP-RM fails to initialize, then
Nouveau immediately shuts down the GSP interface. This would normally
also deallocate the logging buffers, thereby preventing the user from
capturing the debug logs.
To avoid this, introduce the keep-gsp-logging command line parameter. If
specified, and if at least one logging buffer has content, then Nouveau
will migrate these buffers into new debugfs entries that are retained
until the driver unloads.
An end-user can capture the logs using the following commands:
cp /sys/kernel/debug/nouveau/<path>/loginit loginit
cp /sys/kernel/debug/nouveau/<path>/logrm logrm
cp /sys/kernel/debug/nouveau/<path>/logintr logintr
cp /sys/kernel/debug/nouveau/<path>/logpmu logpmu
where (for a PCI device) <path> is the PCI ID of the GPU (e.g.
0000:65:00.0).
Since LOGPMU is not needed for normal GSP-RM operation, it is only
created if debugfs is available. Otherwise, the
NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT RPCs are ignored.
A simple way to test the buffer migration feature is to have
nvkm_gsp_init() return an error code.
Tested-by: Ben Skeggs <bskeggs@nvidia.com>
Signed-off-by: Timur Tabi <ttabi@nvidia.com>
Signed-off-by: Danilo Krummrich <dakr@kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20241030202952.694055-2-ttabi@nvidia.com
2024-10-30 15:29:52 -05:00
|
|
|
#include <linux/debugfs.h>
|
|
|
|
|
2023-09-19 06:21:37 +10:00
|
|
|
#define GSP_PAGE_SHIFT 12
|
|
|
|
#define GSP_PAGE_SIZE BIT(GSP_PAGE_SHIFT)
|
|
|
|
|
|
|
|
struct nvkm_gsp_mem {
|
2024-10-30 15:29:51 -05:00
|
|
|
struct device *dev;
|
2024-02-02 17:06:07 -06:00
|
|
|
size_t size;
|
2023-09-19 06:21:37 +10:00
|
|
|
void *data;
|
|
|
|
dma_addr_t addr;
|
|
|
|
};
|
|
|
|
|
2024-11-14 13:02:37 +10:00
|
|
|
int nvkm_gsp_mem_ctor(struct nvkm_gsp *, size_t size, struct nvkm_gsp_mem *);
|
|
|
|
void nvkm_gsp_mem_dtor(struct nvkm_gsp_mem *);
|
|
|
|
|
2023-09-19 06:21:37 +10:00
|
|
|
struct nvkm_gsp_radix3 {
|
drm/nouveau/gsp: Use the sg allocator for level 2 of radix3
Currently we allocate all 3 levels of radix3 page tables using
nvkm_gsp_mem_ctor(), which uses dma_alloc_coherent() for allocating all of
the relevant memory. This can end up failing in scenarios where the system
has very high memory fragmentation, and we can't find enough contiguous
memory to allocate level 2 of the page table.
Currently, this can result in runtime PM issues on systems where memory
fragmentation is high - as we'll fail to allocate the page table for our
suspend/resume buffer:
kworker/10:2: page allocation failure: order:7, mode:0xcc0(GFP_KERNEL),
nodemask=(null),cpuset=/,mems_allowed=0
CPU: 10 PID: 479809 Comm: kworker/10:2 Not tainted
6.8.6-201.ChopperV6.fc39.x86_64 #1
Hardware name: SLIMBOOK Executive/Executive, BIOS N.1.10GRU06 02/02/2024
Workqueue: pm pm_runtime_work
Call Trace:
<TASK>
dump_stack_lvl+0x64/0x80
warn_alloc+0x165/0x1e0
? __alloc_pages_direct_compact+0xb3/0x2b0
__alloc_pages_slowpath.constprop.0+0xd7d/0xde0
__alloc_pages+0x32d/0x350
__dma_direct_alloc_pages.isra.0+0x16a/0x2b0
dma_direct_alloc+0x70/0x270
nvkm_gsp_radix3_sg+0x5e/0x130 [nouveau]
r535_gsp_fini+0x1d4/0x350 [nouveau]
nvkm_subdev_fini+0x67/0x150 [nouveau]
nvkm_device_fini+0x95/0x1e0 [nouveau]
nvkm_udevice_fini+0x53/0x70 [nouveau]
nvkm_object_fini+0xb9/0x240 [nouveau]
nvkm_object_fini+0x75/0x240 [nouveau]
nouveau_do_suspend+0xf5/0x280 [nouveau]
nouveau_pmops_runtime_suspend+0x3e/0xb0 [nouveau]
pci_pm_runtime_suspend+0x67/0x1e0
? __pfx_pci_pm_runtime_suspend+0x10/0x10
__rpm_callback+0x41/0x170
? __pfx_pci_pm_runtime_suspend+0x10/0x10
rpm_callback+0x5d/0x70
? __pfx_pci_pm_runtime_suspend+0x10/0x10
rpm_suspend+0x120/0x6a0
pm_runtime_work+0x98/0xb0
process_one_work+0x171/0x340
worker_thread+0x27b/0x3a0
? __pfx_worker_thread+0x10/0x10
kthread+0xe5/0x120
? __pfx_kthread+0x10/0x10
ret_from_fork+0x31/0x50
? __pfx_kthread+0x10/0x10
ret_from_fork_asm+0x1b/0x30
Luckily, we don't actually need to allocate coherent memory for the page
table thanks to being able to pass the GPU a radix3 page table for
suspend/resume data. So, let's rewrite nvkm_gsp_radix3_sg() to use the sg
allocator for level 2. We continue using coherent allocations for lvl0 and
1, since they only take a single page.
V2:
* Don't forget to actually jump to the next scatterlist when we reach the
end of the scatterlist we're currently on when writing out the page table
for level 2
Signed-off-by: Lyude Paul <lyude@redhat.com>
Cc: stable@vger.kernel.org
Reviewed-by: Ben Skeggs <bskeggs@nvidia.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240429182318.189668-2-lyude@redhat.com
2024-04-29 14:23:09 -04:00
|
|
|
struct nvkm_gsp_mem lvl0;
|
|
|
|
struct nvkm_gsp_mem lvl1;
|
|
|
|
struct sg_table lvl2;
|
2023-09-19 06:21:37 +10:00
|
|
|
};
|
|
|
|
|
|
|
|
int nvkm_gsp_sg(struct nvkm_device *, u64 size, struct sg_table *);
|
|
|
|
void nvkm_gsp_sg_free(struct nvkm_device *, struct sg_table *);
|
|
|
|
|
|
|
|
typedef int (*nvkm_gsp_msg_ntfy_func)(void *priv, u32 fn, void *repv, u32 repc);
|
2019-02-12 22:28:13 +10:00
|
|
|
|
2023-09-19 06:21:42 +10:00
|
|
|
struct nvkm_gsp_event;
|
|
|
|
typedef void (*nvkm_gsp_event_func)(struct nvkm_gsp_event *, void *repv, u32 repc);
|
|
|
|
|
2025-02-27 01:35:53 +00:00
|
|
|
/**
|
|
|
|
* DOC: GSP message handling policy
|
|
|
|
*
|
|
|
|
* When sending a GSP RPC command, there can be multiple cases of handling
|
|
|
|
* the GSP RPC messages, which are the reply of GSP RPC commands, according
|
|
|
|
* to the requirement of the callers and the nature of the GSP RPC commands.
|
|
|
|
*
|
|
|
|
* NVKM_GSP_RPC_REPLY_NOWAIT - If specified, immediately return to the
|
|
|
|
* caller after the GSP RPC command is issued.
|
|
|
|
*
|
|
|
|
* NVKM_GSP_RPC_REPLY_RECV - If specified, wait and receive the entire GSP
|
|
|
|
* RPC message after the GSP RPC command is issued.
|
|
|
|
*
|
2025-02-27 01:35:54 +00:00
|
|
|
* NVKM_GSP_RPC_REPLY_POLL - If specified, wait for the specific reply and
|
|
|
|
* discard the reply before returning to the caller.
|
|
|
|
*
|
2025-02-27 01:35:53 +00:00
|
|
|
*/
|
|
|
|
enum nvkm_gsp_rpc_reply_policy {
|
|
|
|
NVKM_GSP_RPC_REPLY_NOWAIT = 0,
|
|
|
|
NVKM_GSP_RPC_REPLY_RECV,
|
2025-02-27 01:35:54 +00:00
|
|
|
NVKM_GSP_RPC_REPLY_POLL,
|
2025-02-27 01:35:53 +00:00
|
|
|
};
|
|
|
|
|
2019-02-12 22:28:13 +10:00
|
|
|
struct nvkm_gsp {
|
2022-06-01 20:47:47 +10:00
|
|
|
const struct nvkm_gsp_func *func;
|
2019-02-12 22:28:13 +10:00
|
|
|
struct nvkm_subdev subdev;
|
2022-06-01 20:47:47 +10:00
|
|
|
|
2020-01-15 06:34:21 +10:00
|
|
|
struct nvkm_falcon falcon;
|
2023-09-19 06:21:37 +10:00
|
|
|
|
|
|
|
struct {
|
|
|
|
struct {
|
|
|
|
const struct firmware *load;
|
|
|
|
const struct firmware *unload;
|
|
|
|
} booter;
|
2024-11-25 10:21:18 +10:00
|
|
|
|
|
|
|
const struct firmware *fmc;
|
|
|
|
|
2023-09-19 06:21:37 +10:00
|
|
|
const struct firmware *bl;
|
|
|
|
const struct firmware *rm;
|
|
|
|
} fws;
|
|
|
|
|
|
|
|
struct nvkm_firmware fw;
|
|
|
|
struct nvkm_gsp_mem sig;
|
|
|
|
struct nvkm_gsp_radix3 radix3;
|
|
|
|
|
|
|
|
struct {
|
|
|
|
struct {
|
|
|
|
struct {
|
|
|
|
u64 addr;
|
|
|
|
u64 size;
|
|
|
|
} vga_workspace;
|
|
|
|
u64 addr;
|
|
|
|
u64 size;
|
|
|
|
} bios;
|
|
|
|
struct {
|
|
|
|
struct {
|
|
|
|
u64 addr;
|
|
|
|
u64 size;
|
|
|
|
} frts, boot, elf, heap;
|
|
|
|
u64 addr;
|
|
|
|
u64 size;
|
|
|
|
} wpr2;
|
|
|
|
struct {
|
|
|
|
u64 addr;
|
|
|
|
u64 size;
|
|
|
|
} heap;
|
|
|
|
u64 addr;
|
|
|
|
u64 size;
|
2023-09-19 06:21:41 +10:00
|
|
|
|
|
|
|
struct {
|
|
|
|
u64 addr;
|
|
|
|
u64 size;
|
|
|
|
} region[16];
|
|
|
|
int region_nr;
|
|
|
|
u32 rsvd_size;
|
2023-09-19 06:21:37 +10:00
|
|
|
} fb;
|
|
|
|
|
|
|
|
struct {
|
|
|
|
struct nvkm_falcon_fw load;
|
|
|
|
struct nvkm_falcon_fw unload;
|
|
|
|
} booter;
|
|
|
|
|
2024-11-25 10:21:18 +10:00
|
|
|
struct {
|
|
|
|
struct nvkm_gsp_mem fw;
|
|
|
|
u8 *hash;
|
|
|
|
u8 *pkey;
|
|
|
|
u8 *sig;
|
|
|
|
|
|
|
|
struct nvkm_gsp_mem args;
|
|
|
|
} fmc;
|
|
|
|
|
2023-09-19 06:21:37 +10:00
|
|
|
struct {
|
|
|
|
struct nvkm_gsp_mem fw;
|
|
|
|
u32 code_offset;
|
|
|
|
u32 data_offset;
|
|
|
|
u32 manifest_offset;
|
|
|
|
u32 app_version;
|
|
|
|
} boot;
|
|
|
|
|
|
|
|
struct nvkm_gsp_mem libos;
|
|
|
|
struct nvkm_gsp_mem loginit;
|
|
|
|
struct nvkm_gsp_mem logintr;
|
|
|
|
struct nvkm_gsp_mem logrm;
|
|
|
|
struct nvkm_gsp_mem rmargs;
|
|
|
|
|
|
|
|
struct nvkm_gsp_mem wpr_meta;
|
|
|
|
|
|
|
|
struct {
|
|
|
|
struct sg_table sgt;
|
|
|
|
struct nvkm_gsp_radix3 radix3;
|
|
|
|
struct nvkm_gsp_mem meta;
|
2025-01-29 10:29:40 +10:00
|
|
|
struct sg_table fbsr;
|
2023-09-19 06:21:37 +10:00
|
|
|
} sr;
|
|
|
|
|
|
|
|
struct {
|
|
|
|
struct nvkm_gsp_mem mem;
|
|
|
|
|
|
|
|
struct {
|
|
|
|
int nr;
|
|
|
|
u32 size;
|
|
|
|
u64 *ptr;
|
|
|
|
} ptes;
|
|
|
|
|
|
|
|
struct {
|
|
|
|
u32 size;
|
|
|
|
void *ptr;
|
|
|
|
} cmdq, msgq;
|
|
|
|
} shm;
|
|
|
|
|
|
|
|
struct nvkm_gsp_cmdq {
|
|
|
|
struct mutex mutex;
|
|
|
|
u32 cnt;
|
|
|
|
u32 seq;
|
|
|
|
u32 *wptr;
|
|
|
|
u32 *rptr;
|
|
|
|
} cmdq;
|
|
|
|
|
|
|
|
struct nvkm_gsp_msgq {
|
|
|
|
struct mutex mutex;
|
|
|
|
u32 cnt;
|
|
|
|
u32 *wptr;
|
|
|
|
u32 *rptr;
|
|
|
|
struct nvkm_gsp_msgq_ntfy {
|
|
|
|
u32 fn;
|
|
|
|
nvkm_gsp_msg_ntfy_func func;
|
|
|
|
void *priv;
|
|
|
|
} ntfy[16];
|
|
|
|
int ntfy_nr;
|
2023-09-19 06:21:40 +10:00
|
|
|
struct work_struct work;
|
2023-09-19 06:21:37 +10:00
|
|
|
} msgq;
|
|
|
|
|
|
|
|
bool running;
|
|
|
|
|
2023-09-19 06:21:38 +10:00
|
|
|
/* Internal GSP-RM control handles. */
|
|
|
|
struct {
|
|
|
|
struct nvkm_gsp_client {
|
|
|
|
struct nvkm_gsp_object {
|
|
|
|
struct nvkm_gsp_client *client;
|
|
|
|
struct nvkm_gsp_object *parent;
|
|
|
|
u32 handle;
|
|
|
|
} object;
|
|
|
|
|
|
|
|
struct nvkm_gsp *gsp;
|
2023-09-19 06:21:42 +10:00
|
|
|
|
|
|
|
struct list_head events;
|
2023-09-19 06:21:38 +10:00
|
|
|
} client;
|
|
|
|
|
|
|
|
struct nvkm_gsp_device {
|
|
|
|
struct nvkm_gsp_object object;
|
|
|
|
struct nvkm_gsp_object subdevice;
|
|
|
|
} device;
|
|
|
|
} internal;
|
|
|
|
|
2023-09-19 06:21:40 +10:00
|
|
|
struct {
|
|
|
|
enum nvkm_subdev_type type;
|
|
|
|
int inst;
|
|
|
|
u32 stall;
|
|
|
|
u32 nonstall;
|
|
|
|
} intr[32];
|
|
|
|
int intr_nr;
|
|
|
|
|
2023-09-19 06:21:41 +10:00
|
|
|
struct {
|
|
|
|
u64 rm_bar1_pdb;
|
|
|
|
u64 rm_bar2_pdb;
|
|
|
|
} bar;
|
|
|
|
|
2023-09-19 06:21:45 +10:00
|
|
|
struct {
|
|
|
|
u8 gpcs;
|
|
|
|
u8 tpcs;
|
|
|
|
} gr;
|
|
|
|
|
2025-02-15 02:55:45 +10:00
|
|
|
struct nvkm_rm *rm;
|
2023-09-19 06:21:39 +10:00
|
|
|
|
|
|
|
struct {
|
2024-09-17 13:08:56 +01:00
|
|
|
struct mutex mutex;
|
2023-09-19 06:21:39 +10:00
|
|
|
struct idr idr;
|
|
|
|
} client_id;
|
2024-04-17 16:53:17 -05:00
|
|
|
|
|
|
|
/* A linked list of registry items. The registry RPC will be built from it. */
|
|
|
|
struct list_head registry_list;
|
|
|
|
|
|
|
|
/* The size of the registry RPC */
|
|
|
|
size_t registry_rpc_size;
|
drm/nouveau: expose GSP-RM logging buffers via debugfs
The LOGINIT, LOGINTR, LOGRM, and LOGPMU buffers are circular buffers
that have printf-like logs from GSP-RM and PMU encoded in them.
LOGINIT, LOGINTR, and LOGRM are allocated by Nouveau and their DMA
addresses are passed to GSP-RM during initialization. The buffers are
required for GSP-RM to initialize properly.
LOGPMU is also allocated by Nouveau, but its contents are updated
when Nouveau receives an NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT RPC from
GSP-RM. Nouveau then copies the RPC to the buffer.
The messages are encoded as an array of variable-length structures that
contain the parameters to an NV_PRINTF call. The format string and
parameter count are stored in a special ELF image that contains only
logging strings. This image is not currently shipped with the Nvidia
driver.
There are two methods to extract the logs.
OpenRM tries to load the logging ELF, and if present, parses the log
buffers in real time and outputs the strings to the kernel console.
Alternatively, and this is the method used by this patch, the buffers
can be exposed to user space, and a user-space tool (along with the
logging ELF image) can parse the buffer and dump the logs.
This method has the advantage that it allows the buffers to be parsed
even when the logging ELF file is not available to the user. However,
it has the disadvantage the debugfs entries need to remain until the
driver is unloaded.
The buffers are exposed via debugfs. If GSP-RM fails to initialize, then
Nouveau immediately shuts down the GSP interface. This would normally
also deallocate the logging buffers, thereby preventing the user from
capturing the debug logs.
To avoid this, introduce the keep-gsp-logging command line parameter. If
specified, and if at least one logging buffer has content, then Nouveau
will migrate these buffers into new debugfs entries that are retained
until the driver unloads.
An end-user can capture the logs using the following commands:
cp /sys/kernel/debug/nouveau/<path>/loginit loginit
cp /sys/kernel/debug/nouveau/<path>/logrm logrm
cp /sys/kernel/debug/nouveau/<path>/logintr logintr
cp /sys/kernel/debug/nouveau/<path>/logpmu logpmu
where (for a PCI device) <path> is the PCI ID of the GPU (e.g.
0000:65:00.0).
Since LOGPMU is not needed for normal GSP-RM operation, it is only
created if debugfs is available. Otherwise, the
NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT RPCs are ignored.
A simple way to test the buffer migration feature is to have
nvkm_gsp_init() return an error code.
Tested-by: Ben Skeggs <bskeggs@nvidia.com>
Signed-off-by: Timur Tabi <ttabi@nvidia.com>
Signed-off-by: Danilo Krummrich <dakr@kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20241030202952.694055-2-ttabi@nvidia.com
2024-10-30 15:29:52 -05:00
|
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
/*
|
|
|
|
* Logging buffers in debugfs. The wrapper objects need to remain
|
|
|
|
* in memory until the dentry is deleted.
|
|
|
|
*/
|
|
|
|
struct {
|
|
|
|
struct dentry *parent;
|
|
|
|
struct dentry *init;
|
|
|
|
struct dentry *rm;
|
|
|
|
struct dentry *intr;
|
|
|
|
struct dentry *pmu;
|
|
|
|
} debugfs;
|
|
|
|
struct debugfs_blob_wrapper blob_init;
|
|
|
|
struct debugfs_blob_wrapper blob_intr;
|
|
|
|
struct debugfs_blob_wrapper blob_rm;
|
|
|
|
struct debugfs_blob_wrapper blob_pmu;
|
|
|
|
#endif
|
2019-02-12 22:28:13 +10:00
|
|
|
};
|
2019-02-12 22:28:13 +10:00
|
|
|
|
2023-09-19 06:21:09 +10:00
|
|
|
static inline bool
|
|
|
|
nvkm_gsp_rm(struct nvkm_gsp *gsp)
|
|
|
|
{
|
2023-09-19 06:21:37 +10:00
|
|
|
return gsp && (gsp->fws.rm || gsp->fw.img);
|
|
|
|
}
|
|
|
|
|
2024-11-14 13:02:36 +10:00
|
|
|
#include <rm/rm.h>
|
|
|
|
|
2023-09-19 06:21:37 +10:00
|
|
|
static inline void *
|
|
|
|
nvkm_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
|
|
|
|
{
|
2024-11-14 13:02:36 +10:00
|
|
|
return gsp->rm->api->rpc->get(gsp, fn, argc);
|
2023-09-19 06:21:37 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *
|
2025-02-27 01:35:53 +00:00
|
|
|
nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv,
|
|
|
|
enum nvkm_gsp_rpc_reply_policy policy, u32 repc)
|
2023-09-19 06:21:37 +10:00
|
|
|
{
|
2024-11-14 13:02:36 +10:00
|
|
|
return gsp->rm->api->rpc->push(gsp, argv, policy, repc);
|
2023-09-19 06:21:37 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *
|
|
|
|
nvkm_gsp_rpc_rd(struct nvkm_gsp *gsp, u32 fn, u32 argc)
|
|
|
|
{
|
|
|
|
void *argv = nvkm_gsp_rpc_get(gsp, fn, argc);
|
|
|
|
|
|
|
|
if (IS_ERR_OR_NULL(argv))
|
|
|
|
return argv;
|
|
|
|
|
2025-02-27 01:35:53 +00:00
|
|
|
return nvkm_gsp_rpc_push(gsp, argv, NVKM_GSP_RPC_REPLY_RECV, argc);
|
2023-09-19 06:21:37 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
2025-02-27 01:35:53 +00:00
|
|
|
nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv,
|
|
|
|
enum nvkm_gsp_rpc_reply_policy policy)
|
2023-09-19 06:21:37 +10:00
|
|
|
{
|
2025-02-27 01:35:53 +00:00
|
|
|
void *repv = nvkm_gsp_rpc_push(gsp, argv, policy, 0);
|
2023-09-19 06:21:37 +10:00
|
|
|
|
|
|
|
if (IS_ERR(repv))
|
|
|
|
return PTR_ERR(repv);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
nvkm_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
|
|
|
|
{
|
2024-11-14 13:02:36 +10:00
|
|
|
gsp->rm->api->rpc->done(gsp, repv);
|
2023-09-19 06:21:09 +10:00
|
|
|
}
|
|
|
|
|
2023-09-19 06:21:38 +10:00
|
|
|
static inline void *
|
|
|
|
nvkm_gsp_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
|
|
|
|
{
|
2024-11-14 13:02:36 +10:00
|
|
|
return object->client->gsp->rm->api->ctrl->get(object, cmd, argc);
|
2023-09-19 06:21:38 +10:00
|
|
|
}
|
|
|
|
|
2023-12-22 14:31:57 +10:00
|
|
|
static inline int
|
2023-09-19 06:21:38 +10:00
|
|
|
nvkm_gsp_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
|
|
|
|
{
|
2024-11-14 13:02:36 +10:00
|
|
|
return object->client->gsp->rm->api->ctrl->push(object, argv, repc);
|
2023-09-19 06:21:38 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *
|
|
|
|
nvkm_gsp_rm_ctrl_rd(struct nvkm_gsp_object *object, u32 cmd, u32 repc)
|
|
|
|
{
|
|
|
|
void *argv = nvkm_gsp_rm_ctrl_get(object, cmd, repc);
|
2023-12-22 14:31:57 +10:00
|
|
|
int ret;
|
2023-09-19 06:21:38 +10:00
|
|
|
|
|
|
|
if (IS_ERR(argv))
|
|
|
|
return argv;
|
|
|
|
|
2023-12-22 14:31:57 +10:00
|
|
|
ret = nvkm_gsp_rm_ctrl_push(object, &argv, repc);
|
|
|
|
if (ret)
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
return argv;
|
2023-09-19 06:21:38 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
nvkm_gsp_rm_ctrl_wr(struct nvkm_gsp_object *object, void *argv)
|
|
|
|
{
|
2023-12-22 14:31:57 +10:00
|
|
|
int ret = nvkm_gsp_rm_ctrl_push(object, &argv, 0);
|
2023-09-19 06:21:38 +10:00
|
|
|
|
2023-12-22 14:31:57 +10:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2023-09-19 06:21:38 +10:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
nvkm_gsp_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv)
|
|
|
|
{
|
2024-11-14 13:02:36 +10:00
|
|
|
object->client->gsp->rm->api->ctrl->done(object, repv);
|
2023-09-19 06:21:38 +10:00
|
|
|
}
|
|
|
|
|
2023-09-19 06:21:39 +10:00
|
|
|
static inline void *
|
|
|
|
nvkm_gsp_rm_alloc_get(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u32 argc,
|
|
|
|
struct nvkm_gsp_object *object)
|
|
|
|
{
|
|
|
|
struct nvkm_gsp_client *client = parent->client;
|
|
|
|
struct nvkm_gsp *gsp = client->gsp;
|
|
|
|
void *argv;
|
|
|
|
|
|
|
|
object->client = parent->client;
|
|
|
|
object->parent = parent;
|
|
|
|
object->handle = handle;
|
|
|
|
|
2024-11-14 13:02:36 +10:00
|
|
|
argv = gsp->rm->api->alloc->get(object, oclass, argc);
|
2023-09-19 06:21:39 +10:00
|
|
|
if (IS_ERR_OR_NULL(argv)) {
|
|
|
|
object->client = NULL;
|
|
|
|
return argv;
|
|
|
|
}
|
|
|
|
|
|
|
|
return argv;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *
|
2025-01-24 10:29:47 -08:00
|
|
|
nvkm_gsp_rm_alloc_push(struct nvkm_gsp_object *object, void *argv)
|
2023-09-19 06:21:39 +10:00
|
|
|
{
|
2024-11-14 13:02:36 +10:00
|
|
|
void *repv = object->client->gsp->rm->api->alloc->push(object, argv);
|
2023-09-19 06:21:39 +10:00
|
|
|
|
|
|
|
if (IS_ERR(repv))
|
|
|
|
object->client = NULL;
|
|
|
|
|
|
|
|
return repv;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
nvkm_gsp_rm_alloc_wr(struct nvkm_gsp_object *object, void *argv)
|
|
|
|
{
|
2025-01-24 10:29:47 -08:00
|
|
|
void *repv = nvkm_gsp_rm_alloc_push(object, argv);
|
2023-09-19 06:21:39 +10:00
|
|
|
|
|
|
|
if (IS_ERR(repv))
|
|
|
|
return PTR_ERR(repv);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
nvkm_gsp_rm_alloc_done(struct nvkm_gsp_object *object, void *repv)
|
|
|
|
{
|
2024-11-14 13:02:36 +10:00
|
|
|
object->client->gsp->rm->api->alloc->done(object, repv);
|
2023-09-19 06:21:39 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
nvkm_gsp_rm_alloc(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u32 argc,
|
|
|
|
struct nvkm_gsp_object *object)
|
|
|
|
{
|
|
|
|
void *argv = nvkm_gsp_rm_alloc_get(parent, handle, oclass, argc, object);
|
|
|
|
|
|
|
|
if (IS_ERR_OR_NULL(argv))
|
|
|
|
return argv ? PTR_ERR(argv) : -EIO;
|
|
|
|
|
|
|
|
return nvkm_gsp_rm_alloc_wr(object, argv);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
nvkm_gsp_rm_free(struct nvkm_gsp_object *object)
|
|
|
|
{
|
2024-11-14 13:02:36 +10:00
|
|
|
if (object->client) {
|
|
|
|
int ret = object->client->gsp->rm->api->alloc->free(object);
|
|
|
|
object->client = NULL;
|
|
|
|
return ret;
|
|
|
|
}
|
2023-09-19 06:21:39 +10:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2025-05-14 09:19:56 +10:00
|
|
|
int nvkm_gsp_client_ctor(struct nvkm_gsp *, struct nvkm_gsp_client *);
|
|
|
|
void nvkm_gsp_client_dtor(struct nvkm_gsp_client *);
|
2023-09-19 06:21:39 +10:00
|
|
|
|
|
|
|
static inline int
|
|
|
|
nvkm_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
|
|
|
|
{
|
2024-11-14 13:02:37 +10:00
|
|
|
return client->gsp->rm->api->device->ctor(client, device);
|
2023-09-19 06:21:39 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
nvkm_gsp_device_dtor(struct nvkm_gsp_device *device)
|
|
|
|
{
|
|
|
|
if (device->object.client)
|
2024-11-14 13:02:37 +10:00
|
|
|
device->object.client->gsp->rm->api->device->dtor(device);
|
2023-09-19 06:21:39 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
nvkm_gsp_client_device_ctor(struct nvkm_gsp *gsp,
|
|
|
|
struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
|
|
|
|
{
|
|
|
|
int ret = nvkm_gsp_client_ctor(gsp, client);
|
|
|
|
|
|
|
|
if (ret == 0) {
|
|
|
|
ret = nvkm_gsp_device_ctor(client, device);
|
|
|
|
if (ret)
|
|
|
|
nvkm_gsp_client_dtor(client);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-09-19 06:21:42 +10:00
|
|
|
struct nvkm_gsp_event {
|
|
|
|
struct nvkm_gsp_device *device;
|
|
|
|
u32 id;
|
|
|
|
nvkm_gsp_event_func func;
|
|
|
|
|
|
|
|
struct nvkm_gsp_object object;
|
|
|
|
|
|
|
|
struct list_head head;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
nvkm_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
|
|
|
|
nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
|
|
|
|
{
|
2025-02-15 02:55:45 +10:00
|
|
|
struct nvkm_rm *rm = device->object.client->gsp->rm;
|
2024-11-14 13:02:37 +10:00
|
|
|
|
|
|
|
return rm->api->device->event.ctor(device, handle, id, func, event);
|
2023-09-19 06:21:42 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
nvkm_gsp_event_dtor(struct nvkm_gsp_event *event)
|
|
|
|
{
|
|
|
|
struct nvkm_gsp_device *device = event->device;
|
|
|
|
|
|
|
|
if (device)
|
2024-11-14 13:02:37 +10:00
|
|
|
device->object.client->gsp->rm->api->device->event.dtor(event);
|
2023-09-19 06:21:42 +10:00
|
|
|
}
|
|
|
|
|
2023-09-19 06:21:40 +10:00
|
|
|
int nvkm_gsp_intr_stall(struct nvkm_gsp *, enum nvkm_subdev_type, int);
|
|
|
|
int nvkm_gsp_intr_nonstall(struct nvkm_gsp *, enum nvkm_subdev_type, int);
|
|
|
|
|
2020-12-04 11:23:38 +10:00
|
|
|
int gv100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
|
2023-09-19 06:21:09 +10:00
|
|
|
int tu102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
|
|
|
|
int tu116_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
|
|
|
|
int ga100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
|
2022-06-01 20:48:33 +10:00
|
|
|
int ga102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
|
2024-11-25 10:21:18 +10:00
|
|
|
int gh100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
|
2023-09-19 06:21:37 +10:00
|
|
|
int ad102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
|
2024-11-25 10:27:02 +10:00
|
|
|
int gb100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
|
2025-02-04 08:54:57 +10:00
|
|
|
int gb202_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
|
2019-02-12 22:28:13 +10:00
|
|
|
#endif
|