2021-01-14 18:24:00 +00:00
|
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
/*
|
|
|
|
* Copyright © 2020 Intel Corporation
|
|
|
|
*/
|
|
|
|
|
2022-02-10 17:45:50 +02:00
|
|
|
#include <drm/drm_fourcc.h>
|
|
|
|
|
2023-01-16 18:46:44 +02:00
|
|
|
#include "display/intel_display.h"
|
2021-01-14 18:24:00 +00:00
|
|
|
#include "gem/i915_gem_ioctls.h"
|
2021-04-29 11:30:53 +01:00
|
|
|
#include "gem/i915_gem_lmem.h"
|
2021-01-14 18:24:00 +00:00
|
|
|
#include "gem/i915_gem_region.h"
|
2021-09-24 12:14:45 -07:00
|
|
|
#include "pxp/intel_pxp.h"
|
2021-01-14 18:24:00 +00:00
|
|
|
|
|
|
|
#include "i915_drv.h"
|
2022-02-10 17:45:42 +02:00
|
|
|
#include "i915_gem_create.h"
|
2021-04-29 11:30:51 +01:00
|
|
|
#include "i915_trace.h"
|
2021-04-29 11:30:52 +01:00
|
|
|
#include "i915_user_extensions.h"
|
2021-04-29 11:30:51 +01:00
|
|
|
|
2021-07-23 12:21:38 -05:00
|
|
|
static u32 object_max_page_size(struct intel_memory_region **placements,
|
|
|
|
unsigned int n_placements)
|
2021-04-29 11:30:53 +01:00
|
|
|
{
|
|
|
|
u32 max_page_size = 0;
|
|
|
|
int i;
|
|
|
|
|
2021-07-23 12:21:38 -05:00
|
|
|
for (i = 0; i < n_placements; i++) {
|
|
|
|
struct intel_memory_region *mr = placements[i];
|
2021-04-29 11:30:53 +01:00
|
|
|
|
|
|
|
GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
|
|
|
|
max_page_size = max_t(u32, max_page_size, mr->min_page_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
GEM_BUG_ON(!max_page_size);
|
|
|
|
return max_page_size;
|
|
|
|
}
|
|
|
|
|
2021-07-23 12:21:36 -05:00
|
|
|
static int object_set_placements(struct drm_i915_gem_object *obj,
|
|
|
|
struct intel_memory_region **placements,
|
|
|
|
unsigned int n_placements)
|
2021-04-29 11:30:53 +01:00
|
|
|
{
|
2021-07-23 12:21:36 -05:00
|
|
|
struct intel_memory_region **arr;
|
|
|
|
unsigned int i;
|
|
|
|
|
2021-04-29 11:30:53 +01:00
|
|
|
GEM_BUG_ON(!n_placements);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For the common case of one memory region, skip storing an
|
|
|
|
* allocated array and just point at the region directly.
|
|
|
|
*/
|
|
|
|
if (n_placements == 1) {
|
|
|
|
struct intel_memory_region *mr = placements[0];
|
|
|
|
struct drm_i915_private *i915 = mr->i915;
|
|
|
|
|
|
|
|
obj->mm.placements = &i915->mm.regions[mr->id];
|
|
|
|
obj->mm.n_placements = 1;
|
|
|
|
} else {
|
2021-07-23 12:21:36 -05:00
|
|
|
arr = kmalloc_array(n_placements,
|
|
|
|
sizeof(struct intel_memory_region *),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!arr)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
for (i = 0; i < n_placements; i++)
|
|
|
|
arr[i] = placements[i];
|
|
|
|
|
|
|
|
obj->mm.placements = arr;
|
2021-04-29 11:30:53 +01:00
|
|
|
obj->mm.n_placements = n_placements;
|
|
|
|
}
|
2021-07-23 12:21:36 -05:00
|
|
|
|
|
|
|
return 0;
|
2021-04-29 11:30:53 +01:00
|
|
|
}
|
|
|
|
|
2021-04-29 11:30:51 +01:00
|
|
|
static int i915_gem_publish(struct drm_i915_gem_object *obj,
|
|
|
|
struct drm_file *file,
|
|
|
|
u64 *size_p,
|
|
|
|
u32 *handle_p)
|
|
|
|
{
|
|
|
|
u64 size = obj->base.size;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = drm_gem_handle_create(file, &obj->base, handle_p);
|
|
|
|
/* drop reference from allocate - handle holds it now */
|
|
|
|
i915_gem_object_put(obj);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
*size_p = size;
|
|
|
|
return 0;
|
|
|
|
}
|
2021-01-14 18:24:00 +00:00
|
|
|
|
2021-09-24 12:14:45 -07:00
|
|
|
static struct drm_i915_gem_object *
|
|
|
|
__i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size,
|
|
|
|
struct intel_memory_region **placements,
|
|
|
|
unsigned int n_placements,
|
|
|
|
unsigned int ext_flags)
|
2021-01-14 18:24:00 +00:00
|
|
|
{
|
2021-07-23 12:21:38 -05:00
|
|
|
struct intel_memory_region *mr = placements[0];
|
|
|
|
struct drm_i915_gem_object *obj;
|
2021-04-29 11:30:55 +01:00
|
|
|
unsigned int flags;
|
2021-01-14 18:24:00 +00:00
|
|
|
int ret;
|
|
|
|
|
2021-07-23 12:21:38 -05:00
|
|
|
i915_gem_flush_free_objects(i915);
|
|
|
|
|
|
|
|
size = round_up(size, object_max_page_size(placements, n_placements));
|
2021-01-14 18:24:00 +00:00
|
|
|
if (size == 0)
|
2021-07-23 12:21:38 -05:00
|
|
|
return ERR_PTR(-EINVAL);
|
2021-01-14 18:24:00 +00:00
|
|
|
|
|
|
|
/* For most of the ABI (e.g. mmap) we think in system pages */
|
|
|
|
GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
|
|
|
|
|
2021-04-29 11:30:51 +01:00
|
|
|
if (i915_gem_object_size_2big(size))
|
2021-07-23 12:21:38 -05:00
|
|
|
return ERR_PTR(-E2BIG);
|
|
|
|
|
|
|
|
obj = i915_gem_object_alloc();
|
|
|
|
if (!obj)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
ret = object_set_placements(obj, placements, n_placements);
|
|
|
|
if (ret)
|
|
|
|
goto object_free;
|
2021-01-14 18:24:01 +00:00
|
|
|
|
2021-04-29 11:30:55 +01:00
|
|
|
/*
|
2021-06-10 09:01:49 +02:00
|
|
|
* I915_BO_ALLOC_USER will make sure the object is cleared before
|
|
|
|
* any user access.
|
2021-04-29 11:30:55 +01:00
|
|
|
*/
|
2021-06-10 09:01:49 +02:00
|
|
|
flags = I915_BO_ALLOC_USER;
|
2021-04-29 11:30:55 +01:00
|
|
|
|
2022-03-15 18:14:22 +00:00
|
|
|
ret = mr->ops->init_object(mr, obj, I915_BO_INVALID_OFFSET, size, 0, flags);
|
2021-01-14 18:24:00 +00:00
|
|
|
if (ret)
|
2021-07-23 12:21:38 -05:00
|
|
|
goto object_free;
|
2021-01-14 18:24:00 +00:00
|
|
|
|
2021-04-29 11:30:51 +01:00
|
|
|
GEM_BUG_ON(size != obj->base.size);
|
|
|
|
|
2021-09-24 12:14:45 -07:00
|
|
|
/* Add any flag set by create_ext options */
|
|
|
|
obj->flags |= ext_flags;
|
|
|
|
|
2021-04-29 11:30:51 +01:00
|
|
|
trace_i915_gem_object_create(obj);
|
2021-07-23 12:21:38 -05:00
|
|
|
return obj;
|
|
|
|
|
|
|
|
object_free:
|
|
|
|
if (obj->mm.n_placements > 1)
|
|
|
|
kfree(obj->mm.placements);
|
|
|
|
i915_gem_object_free(obj);
|
|
|
|
return ERR_PTR(ret);
|
2021-01-14 18:24:00 +00:00
|
|
|
}
|
|
|
|
|
2021-09-24 12:14:45 -07:00
|
|
|
/**
|
2023-03-31 10:25:55 +01:00
|
|
|
* __i915_gem_object_create_user - Creates a new object using the same path as
|
|
|
|
* DRM_I915_GEM_CREATE_EXT
|
2021-09-24 12:14:45 -07:00
|
|
|
* @i915: i915 private
|
|
|
|
* @size: size of the buffer, in bytes
|
|
|
|
* @placements: possible placement regions, in priority order
|
|
|
|
* @n_placements: number of possible placement regions
|
|
|
|
*
|
|
|
|
* This function is exposed primarily for selftests and does very little
|
|
|
|
* error checking. It is assumed that the set of placement regions has
|
|
|
|
* already been verified to be valid.
|
|
|
|
*/
|
|
|
|
struct drm_i915_gem_object *
|
|
|
|
__i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
|
|
|
|
struct intel_memory_region **placements,
|
|
|
|
unsigned int n_placements)
|
|
|
|
{
|
|
|
|
return __i915_gem_object_create_user_ext(i915, size, placements,
|
|
|
|
n_placements, 0);
|
|
|
|
}
|
|
|
|
|
2021-01-14 18:24:00 +00:00
|
|
|
int
|
|
|
|
i915_gem_dumb_create(struct drm_file *file,
|
|
|
|
struct drm_device *dev,
|
|
|
|
struct drm_mode_create_dumb *args)
|
|
|
|
{
|
2021-04-29 11:30:51 +01:00
|
|
|
struct drm_i915_gem_object *obj;
|
2021-04-29 11:30:53 +01:00
|
|
|
struct intel_memory_region *mr;
|
2021-01-14 18:24:00 +00:00
|
|
|
enum intel_memory_type mem_type;
|
|
|
|
int cpp = DIV_ROUND_UP(args->bpp, 8);
|
|
|
|
u32 format;
|
|
|
|
|
|
|
|
switch (cpp) {
|
|
|
|
case 1:
|
|
|
|
format = DRM_FORMAT_C8;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
format = DRM_FORMAT_RGB565;
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
format = DRM_FORMAT_XRGB8888;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* have to work out size/pitch and return them */
|
|
|
|
args->pitch = ALIGN(args->width * cpp, 64);
|
|
|
|
|
|
|
|
/* align stride to page size so that we can remap */
|
2025-02-06 20:55:23 +02:00
|
|
|
if (args->pitch > intel_plane_fb_max_stride(dev, format,
|
2021-01-14 18:24:00 +00:00
|
|
|
DRM_FORMAT_MOD_LINEAR))
|
|
|
|
args->pitch = ALIGN(args->pitch, 4096);
|
|
|
|
|
|
|
|
if (args->pitch < args->width)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
args->size = mul_u32_u32(args->pitch, args->height);
|
|
|
|
|
|
|
|
mem_type = INTEL_MEMORY_SYSTEM;
|
|
|
|
if (HAS_LMEM(to_i915(dev)))
|
|
|
|
mem_type = INTEL_MEMORY_LOCAL;
|
|
|
|
|
2021-04-29 11:30:53 +01:00
|
|
|
mr = intel_memory_region_by_type(to_i915(dev), mem_type);
|
|
|
|
|
2021-07-23 12:21:38 -05:00
|
|
|
obj = __i915_gem_object_create_user(to_i915(dev), args->size, &mr, 1);
|
|
|
|
if (IS_ERR(obj))
|
|
|
|
return PTR_ERR(obj);
|
2021-04-29 11:30:51 +01:00
|
|
|
|
|
|
|
return i915_gem_publish(obj, file, &args->size, &args->handle);
|
2021-01-14 18:24:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2023-03-31 10:25:55 +01:00
|
|
|
* i915_gem_create_ioctl - Creates a new mm object and returns a handle to it.
|
2021-01-14 18:24:00 +00:00
|
|
|
* @dev: drm device pointer
|
|
|
|
* @data: ioctl data blob
|
|
|
|
* @file: drm file pointer
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_gem_create_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = to_i915(dev);
|
|
|
|
struct drm_i915_gem_create *args = data;
|
2021-04-29 11:30:51 +01:00
|
|
|
struct drm_i915_gem_object *obj;
|
2021-04-29 11:30:53 +01:00
|
|
|
struct intel_memory_region *mr;
|
2021-04-29 11:30:51 +01:00
|
|
|
|
2021-04-29 11:30:53 +01:00
|
|
|
mr = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
|
|
|
|
|
2021-07-23 12:21:38 -05:00
|
|
|
obj = __i915_gem_object_create_user(i915, args->size, &mr, 1);
|
|
|
|
if (IS_ERR(obj))
|
|
|
|
return PTR_ERR(obj);
|
2021-04-29 11:30:51 +01:00
|
|
|
|
|
|
|
return i915_gem_publish(obj, file, &args->size, &args->handle);
|
2021-01-14 18:24:00 +00:00
|
|
|
}
|
2021-04-29 11:30:52 +01:00
|
|
|
|
|
|
|
struct create_ext {
|
|
|
|
struct drm_i915_private *i915;
|
2021-07-23 12:21:36 -05:00
|
|
|
struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
|
|
|
|
unsigned int n_placements;
|
2022-06-29 18:43:43 +01:00
|
|
|
unsigned int placement_mask;
|
2021-09-24 12:14:45 -07:00
|
|
|
unsigned long flags;
|
2023-06-06 12:00:42 +02:00
|
|
|
unsigned int pat_index;
|
2021-04-29 11:30:52 +01:00
|
|
|
};
|
|
|
|
|
2021-04-29 11:30:53 +01:00
|
|
|
static void repr_placements(char *buf, size_t size,
|
|
|
|
struct intel_memory_region **placements,
|
|
|
|
int n_placements)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
buf[0] = '\0';
|
|
|
|
|
|
|
|
for (i = 0; i < n_placements; i++) {
|
|
|
|
struct intel_memory_region *mr = placements[i];
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = snprintf(buf, size, "\n %s -> { class: %d, inst: %d }",
|
|
|
|
mr->name, mr->type, mr->instance);
|
|
|
|
if (r >= size)
|
|
|
|
return;
|
|
|
|
|
|
|
|
buf += r;
|
|
|
|
size -= r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
|
|
|
|
struct create_ext *ext_data)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = ext_data->i915;
|
|
|
|
struct drm_i915_gem_memory_class_instance __user *uregions =
|
|
|
|
u64_to_user_ptr(args->regions);
|
2021-07-23 12:21:36 -05:00
|
|
|
struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
|
2021-04-29 11:30:53 +01:00
|
|
|
u32 mask;
|
|
|
|
int i, ret = 0;
|
|
|
|
|
|
|
|
if (args->pad) {
|
|
|
|
drm_dbg(&i915->drm, "pad should be zero\n");
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!args->num_regions) {
|
|
|
|
drm_dbg(&i915->drm, "num_regions is zero\n");
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
|
2021-07-23 12:21:36 -05:00
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(i915->mm.regions) != ARRAY_SIZE(placements));
|
|
|
|
BUILD_BUG_ON(ARRAY_SIZE(ext_data->placements) != ARRAY_SIZE(placements));
|
2021-04-29 11:30:53 +01:00
|
|
|
if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) {
|
|
|
|
drm_dbg(&i915->drm, "num_regions is too large\n");
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
mask = 0;
|
|
|
|
for (i = 0; i < args->num_regions; i++) {
|
|
|
|
struct drm_i915_gem_memory_class_instance region;
|
|
|
|
struct intel_memory_region *mr;
|
|
|
|
|
2021-07-23 12:21:36 -05:00
|
|
|
if (copy_from_user(®ion, uregions, sizeof(region)))
|
|
|
|
return -EFAULT;
|
2021-04-29 11:30:53 +01:00
|
|
|
|
|
|
|
mr = intel_memory_region_lookup(i915,
|
|
|
|
region.memory_class,
|
|
|
|
region.memory_instance);
|
|
|
|
if (!mr || mr->private) {
|
|
|
|
drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n",
|
|
|
|
region.memory_class, region.memory_instance, i);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_dump;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mask & BIT(mr->id)) {
|
|
|
|
drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n",
|
|
|
|
mr->name, region.memory_class,
|
|
|
|
region.memory_instance, i);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_dump;
|
|
|
|
}
|
|
|
|
|
|
|
|
placements[i] = mr;
|
|
|
|
mask |= BIT(mr->id);
|
|
|
|
|
|
|
|
++uregions;
|
|
|
|
}
|
|
|
|
|
2021-07-23 12:21:36 -05:00
|
|
|
if (ext_data->n_placements) {
|
2021-04-29 11:30:53 +01:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_dump;
|
|
|
|
}
|
|
|
|
|
2021-07-23 12:21:36 -05:00
|
|
|
ext_data->n_placements = args->num_regions;
|
|
|
|
for (i = 0; i < args->num_regions; i++)
|
|
|
|
ext_data->placements[i] = placements[i];
|
2021-04-29 11:30:53 +01:00
|
|
|
|
2022-06-29 18:43:43 +01:00
|
|
|
ext_data->placement_mask = mask;
|
2021-04-29 11:30:53 +01:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_dump:
|
|
|
|
if (1) {
|
|
|
|
char buf[256];
|
|
|
|
|
2021-07-23 12:21:36 -05:00
|
|
|
if (ext_data->n_placements) {
|
2021-04-29 11:30:53 +01:00
|
|
|
repr_placements(buf,
|
|
|
|
sizeof(buf),
|
2021-07-23 12:21:36 -05:00
|
|
|
ext_data->placements,
|
|
|
|
ext_data->n_placements);
|
2021-04-29 11:30:53 +01:00
|
|
|
drm_dbg(&i915->drm,
|
|
|
|
"Placements were already set in previous EXT. Existing placements: %s\n",
|
|
|
|
buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
repr_placements(buf, sizeof(buf), placements, i);
|
|
|
|
drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ext_set_placements(struct i915_user_extension __user *base,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_create_ext_memory_regions ext;
|
|
|
|
|
|
|
|
if (copy_from_user(&ext, base, sizeof(ext)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return set_placements(&ext, data);
|
|
|
|
}
|
|
|
|
|
2021-09-24 12:14:45 -07:00
|
|
|
static int ext_set_protected(struct i915_user_extension __user *base, void *data)
|
|
|
|
{
|
|
|
|
struct drm_i915_gem_create_ext_protected_content ext;
|
|
|
|
struct create_ext *ext_data = data;
|
|
|
|
|
|
|
|
if (copy_from_user(&ext, base, sizeof(ext)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (ext.flags)
|
|
|
|
return -EINVAL;
|
|
|
|
|
drm/i915/pxp: Promote pxp subsystem to top-level of i915
Starting with MTL, there will be two GT-tiles, a render and media
tile. PXP as a service for supporting workloads with protected
contexts and protected buffers can be subscribed by process
workloads on any tile. However, depending on the platform,
only one of the tiles is used for control events pertaining to PXP
operation (such as creating the arbitration session and session
tear-down).
PXP as a global feature is accessible via batch buffer instructions
on any engine/tile and the coherency across tiles is handled implicitly
by the HW. In fact, for the foreseeable future, we are expecting this
single-control-tile for the PXP subsystem.
In MTL, it's the standalone media tile (not the root tile) because
it contains the VDBOX and KCR engine (among the assets PXP relies on
for those events).
Looking at the current code design, each tile is represented by the
intel_gt structure while the intel_pxp structure currently hangs off the
intel_gt structure.
Keeping the intel_pxp structure within the intel_gt structure makes some
internal functionalities more straight forward but adds code complexity to
code readability and maintainibility to many external-to-pxp subsystems
which may need to pick the correct intel_gt structure. An example of this
would be the intel_pxp_is_active or intel_pxp_is_enabled functionality
which should be viewed as a global level inquiry, not a per-gt inquiry.
That said, this series promotes the intel_pxp structure into the
drm_i915_private structure making it a top-level subsystem and the PXP
subsystem will select the control gt internally and keep a pointer to
it for internal reference.
This promotion comes with two noteworthy changes:
1. Exported pxp functions that are called by external subsystems
(such as intel_pxp_enabled/active) will have to check implicitly
if i915->pxp is valid as that structure will not be allocated
for HW that doesn't support PXP.
2. Since GT is now considered a soft-dependency of PXP we are
ensuring that GT init happens before PXP init and vice versa
for fini. This causes a minor ordering change whereby we previously
called intel_pxp_suspend after intel_uc_suspend but now is before
i915_gem_suspend_late but the change is required for correct
dependency flows. Additionally, this re-order change doesn't
have any impact because at that point in either case, the top level
entry to i915 won't observe any PXP events (since the GPU was
quiesced during suspend_prepare). Also, any PXP event doesn't
really matter when we disable the PXP HW (global GT irqs are
already off anyway, so even if there was a bug that generated
spurious events we wouldn't see it and we would just clean it
up on resume which is okay since the default fallback action
for PXP would be to keep the sessions off at this suspend stage).
Changes from prior revs:
v11: - Reformat a comment (Tvrtko).
v10: - Change the code flow for intel_pxp_init to make it more
cleaner and readible with better comments explaining the
difference between full-PXP-feature vs the partial-teelink
inits depending on the platform. Additionally, only do
the pxp allocation when we are certain the subsystem is
needed. (Tvrtko).
v9: - Cosmetic cleanups in supported/enabled/active. (Daniele).
- Add comments for intel_pxp_init and pxp_get_ctrl_gt that
explain the functional flow for when PXP is not supported
but the backend-assets are needed for HuC authentication
(Daniele and Tvrtko).
- Fix two remaining functions that are accessible outside
PXP that need to be checking pxp ptrs before using them:
intel_pxp_irq_handler and intel_pxp_huc_load_and_auth
(Tvrtko and Daniele).
- User helper macro in pxp-debugfs (Tvrtko).
v8: - Remove pxp_to_gt macro (Daniele).
- Fix a bug in pxp_get_ctrl_gt for the case of MTL and we don't
support GSC-FW on it. (Daniele).
- Leave i915->pxp as NULL if we dont support PXP and in line
with that, do additional validity check on i915->pxp for
intel_pxp_is_supported/enabled/active (Daniele).
- Remove unncessary include header from intel_gt_debugfs.c
and check drm_minor i915->drm.primary (Daniele).
- Other cosmetics / minor issues / more comments on suspend
flow order change (Daniele).
v7: - Drop i915_dev_to_pxp and in intel_pxp_init use 'i915->pxp'
through out instead of local variable newpxp. (Rodrigo)
- In the case intel_pxp_fini is called during driver unload but
after i915 loading failed without pxp being allocated, check
i915->pxp before referencing it. (Alan)
v6: - Remove HAS_PXP macro and replace it with intel_pxp_is_supported
because : [1] introduction of 'ctrl_gt' means we correct this
for MTL's upcoming series now. [2] Also, this has little impact
globally as its only used by PXP-internal callers at the moment.
- Change intel_pxp_init/fini to take in i915 as its input to avoid
ptr-to-ptr in init/fini calls.(Jani).
- Remove the backpointer from pxp->i915 since we can use
pxp->ctrl_gt->i915 if we need it. (Rodrigo).
v5: - Switch from series to single patch (Rodrigo).
- change function name from pxp_get_kcr_owner_gt to
pxp_get_ctrl_gt.
- Fix CI BAT failure by removing redundant call to intel_pxp_fini
from driver-remove.
- NOTE: remaining open still persists on using ptr-to-ptr
and back-ptr.
v4: - Instead of maintaining intel_pxp as an intel_gt structure member
and creating a number of convoluted helpers that takes in i915 as
input and redirects to the correct intel_gt or takes any intel_gt
and internally replaces with the correct intel_gt, promote it to
be a top-level i915 structure.
v3: - Rename gt level helper functions to "intel_pxp_is_enabled/
supported/ active_on_gt" (Daniele)
- Upgrade _gt_supports_pxp to replace what was intel_gtpxp_is
supported as the new intel_pxp_is_supported_on_gt to check for
PXP feature support vs the tee support for huc authentication.
Fix pxp-debugfs-registration to use only the former to decide
support. (Daniele)
- Couple minor optimizations.
v2: - Avoid introduction of new device info or gt variables and use
existing checks / macros to differentiate the correct GT->PXP
control ownership (Daniele Ceraolo Spurio)
- Don't reuse the updated global-checkers for per-GT callers (such
as other files within PXP) to avoid unnecessary GT-reparsing,
expose a replacement helper like the prior ones. (Daniele).
v1: - Add one more patch to the series for the intel_pxp suspend/resume
for similar refactoring
References: https://patchwork.freedesktop.org/patch/msgid/20221202011407.4068371-1-alan.previn.teres.alexis@intel.com
Signed-off-by: Alan Previn <alan.previn.teres.alexis@intel.com>
Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Acked-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221208180542.998148-1-alan.previn.teres.alexis@intel.com
2022-12-08 10:05:42 -08:00
|
|
|
if (!intel_pxp_is_enabled(ext_data->i915->pxp))
|
2021-09-24 12:14:45 -07:00
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
ext_data->flags |= I915_BO_PROTECTED;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-06-06 12:00:42 +02:00
|
|
|
static int ext_set_pat(struct i915_user_extension __user *base, void *data)
|
|
|
|
{
|
|
|
|
struct create_ext *ext_data = data;
|
|
|
|
struct drm_i915_private *i915 = ext_data->i915;
|
|
|
|
struct drm_i915_gem_create_ext_set_pat ext;
|
|
|
|
unsigned int max_pat_index;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(struct drm_i915_gem_create_ext_set_pat) !=
|
|
|
|
offsetofend(struct drm_i915_gem_create_ext_set_pat, rsvd));
|
|
|
|
|
2023-08-21 11:06:29 -07:00
|
|
|
/* Limiting the extension only to Xe_LPG and beyond */
|
|
|
|
if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 70))
|
2023-06-06 12:00:42 +02:00
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (copy_from_user(&ext, base, sizeof(ext)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
max_pat_index = INTEL_INFO(i915)->max_pat_index;
|
|
|
|
|
|
|
|
if (ext.pat_index > max_pat_index) {
|
|
|
|
drm_dbg(&i915->drm, "PAT index is invalid: %u\n",
|
|
|
|
ext.pat_index);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ext_data->pat_index = ext.pat_index;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-04-29 11:30:52 +01:00
|
|
|
static const i915_user_extension_fn create_extensions[] = {
|
2021-04-29 11:30:53 +01:00
|
|
|
[I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
|
2021-09-24 12:14:45 -07:00
|
|
|
[I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected,
|
2023-06-06 12:00:42 +02:00
|
|
|
[I915_GEM_CREATE_EXT_SET_PAT] = ext_set_pat,
|
2021-04-29 11:30:52 +01:00
|
|
|
};
|
|
|
|
|
2023-06-06 12:00:42 +02:00
|
|
|
#define PAT_INDEX_NOT_SET 0xffff
|
2021-04-29 11:30:52 +01:00
|
|
|
/**
|
2023-03-31 10:25:55 +01:00
|
|
|
* i915_gem_create_ext_ioctl - Creates a new mm object and returns a handle to it.
|
2021-04-29 11:30:52 +01:00
|
|
|
* @dev: drm device pointer
|
|
|
|
* @data: ioctl data blob
|
|
|
|
* @file: drm file pointer
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
|
|
|
|
struct drm_file *file)
|
|
|
|
{
|
|
|
|
struct drm_i915_private *i915 = to_i915(dev);
|
|
|
|
struct drm_i915_gem_create_ext *args = data;
|
|
|
|
struct create_ext ext_data = { .i915 = i915 };
|
|
|
|
struct drm_i915_gem_object *obj;
|
|
|
|
int ret;
|
|
|
|
|
2022-06-29 18:43:43 +01:00
|
|
|
if (args->flags & ~I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS)
|
2021-04-29 11:30:52 +01:00
|
|
|
return -EINVAL;
|
|
|
|
|
2023-06-06 12:00:42 +02:00
|
|
|
ext_data.pat_index = PAT_INDEX_NOT_SET;
|
2021-04-29 11:30:52 +01:00
|
|
|
ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
|
|
|
|
create_extensions,
|
|
|
|
ARRAY_SIZE(create_extensions),
|
|
|
|
&ext_data);
|
|
|
|
if (ret)
|
2021-07-23 12:21:38 -05:00
|
|
|
return ret;
|
2021-04-29 11:30:52 +01:00
|
|
|
|
2021-07-23 12:21:36 -05:00
|
|
|
if (!ext_data.n_placements) {
|
|
|
|
ext_data.placements[0] =
|
2021-04-29 11:30:53 +01:00
|
|
|
intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
|
2021-07-23 12:21:36 -05:00
|
|
|
ext_data.n_placements = 1;
|
2021-04-29 11:30:53 +01:00
|
|
|
}
|
|
|
|
|
2022-06-29 18:43:43 +01:00
|
|
|
if (args->flags & I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) {
|
|
|
|
if (ext_data.n_placements == 1)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We always need to be able to spill to system memory, if we
|
|
|
|
* can't place in the mappable part of LMEM.
|
|
|
|
*/
|
|
|
|
if (!(ext_data.placement_mask & BIT(INTEL_REGION_SMEM)))
|
|
|
|
return -EINVAL;
|
|
|
|
} else {
|
|
|
|
if (ext_data.n_placements > 1 ||
|
|
|
|
ext_data.placements[0]->type != INTEL_MEMORY_SYSTEM)
|
|
|
|
ext_data.flags |= I915_BO_ALLOC_GPU_ONLY;
|
|
|
|
}
|
drm/i915/uapi: apply ALLOC_GPU_ONLY by default
On small BAR configurations, when dealing with I915_MEMORY_CLASS_DEVICE
allocations, we assume that by default, all userspace allocations should
be placed in the non-CPU visible portion. Note that dumb buffers are
not included here, since these are not "GPU accelerated" and likely need
CPU access. We choose to just always set GPU_ONLY, and let the backend
figure out if that should be ignored or not, for example on full BAR
systems.
In a later patch userspace will be able to provide a hint if CPU access
to the buffer is needed.
v2(Thomas)
- Apply GPU_ONLY on all discrete devices, but only if the BO can be
placed in LMEM. Down in the depths this should be turned into a noop,
where required, and as an annotation it still make some sense. If we
apply it regardless of the placements then we end up needing to check
the placements during exec capture. Also it's slightly inconsistent
since the NEEDS_CPU_ACCESS can only be applied on objects that can be
placed in LMEM. The other annoyance would be gem_create_ext vs plain
gem_create, if we were to always apply GPU_ONLY.
Testcase: igt@gem-create@create-ext-cpu-access-sanity-check
Testcase: igt@gem-create@create-ext-cpu-access-big
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Jon Bloomfield <jon.bloomfield@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Kenneth Graunke <kenneth@whitecape.org>
Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220629174350.384910-5-matthew.auld@intel.com
2022-06-29 18:43:42 +01:00
|
|
|
|
2021-09-24 12:14:45 -07:00
|
|
|
obj = __i915_gem_object_create_user_ext(i915, args->size,
|
|
|
|
ext_data.placements,
|
|
|
|
ext_data.n_placements,
|
|
|
|
ext_data.flags);
|
2021-07-23 12:21:38 -05:00
|
|
|
if (IS_ERR(obj))
|
|
|
|
return PTR_ERR(obj);
|
2021-04-29 11:30:52 +01:00
|
|
|
|
2023-06-06 12:00:42 +02:00
|
|
|
if (ext_data.pat_index != PAT_INDEX_NOT_SET) {
|
|
|
|
i915_gem_object_set_pat_index(obj, ext_data.pat_index);
|
|
|
|
/* Mark pat_index is set by UMD */
|
|
|
|
obj->pat_set_by_user = true;
|
|
|
|
}
|
|
|
|
|
2021-04-29 11:30:52 +01:00
|
|
|
return i915_gem_publish(obj, file, &args->size, &args->handle);
|
|
|
|
}
|