2014-05-19 14:54:33 +10:00
|
|
|
/*
|
2011-07-04 16:25:18 +10:00
|
|
|
* Copyright 2011 Red Hat Inc.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
|
|
* to deal in the Software without restriction, including without limitation
|
|
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
*
|
|
|
|
* Authors: Ben Skeggs
|
|
|
|
*/
|
2018-05-08 20:39:47 +10:00
|
|
|
#include "disp.h"
|
|
|
|
#include "atom.h"
|
|
|
|
#include "core.h"
|
|
|
|
#include "head.h"
|
|
|
|
#include "wndw.h"
|
2020-01-21 15:53:46 -05:00
|
|
|
#include "handles.h"
|
2011-07-04 16:25:18 +10:00
|
|
|
|
2011-07-05 10:33:08 +10:00
|
|
|
#include <linux/dma-mapping.h>
|
2017-04-11 13:11:18 -04:00
|
|
|
#include <linux/hdmi.h>
|
2020-01-13 15:17:21 +01:00
|
|
|
#include <linux/component.h>
|
2011-07-05 13:08:40 +10:00
|
|
|
|
drm: Pass the full state to connectors atomic functions
The current atomic helpers have either their object state being passed as
an argument or the full atomic state.
The former is the pattern that was done at first, before switching to the
latter for new hooks or when it was needed.
Now that the CRTCs have been converted, let's move forward with the
connectors to provide a consistent interface.
The conversion was done using the coccinelle script below, and built tested
on all the drivers.
@@
identifier connector, connector_state;
@@
struct drm_connector_helper_funcs {
...
struct drm_encoder* (*atomic_best_encoder)(struct drm_connector *connector,
- struct drm_connector_state *connector_state);
+ struct drm_atomic_state *state);
...
}
@@
identifier connector, connector_state;
@@
struct drm_connector_helper_funcs {
...
void (*atomic_commit)(struct drm_connector *connector,
- struct drm_connector_state *connector_state);
+ struct drm_atomic_state *state);
...
}
@@
struct drm_connector_helper_funcs *FUNCS;
identifier state;
identifier connector, connector_state;
identifier f;
@@
f(..., struct drm_atomic_state *state, ...)
{
<+...
- FUNCS->atomic_commit(connector, connector_state);
+ FUNCS->atomic_commit(connector, state);
...+>
}
@@
struct drm_connector_helper_funcs *FUNCS;
identifier state;
identifier connector, connector_state;
identifier var, f;
@@
f(struct drm_atomic_state *state, ...)
{
<+...
- var = FUNCS->atomic_best_encoder(connector, connector_state);
+ var = FUNCS->atomic_best_encoder(connector, state);
...+>
}
@ connector_atomic_func @
identifier helpers;
identifier func;
@@
(
static struct drm_connector_helper_funcs helpers = {
...,
.atomic_best_encoder = func,
...,
};
|
static struct drm_connector_helper_funcs helpers = {
...,
.atomic_commit = func,
...,
};
)
@@
identifier connector_atomic_func.func;
identifier connector;
symbol state;
@@
func(struct drm_connector *connector,
- struct drm_connector_state *state
+ struct drm_connector_state *connector_state
)
{
...
- state
+ connector_state
...
}
@ ignores_state @
identifier connector_atomic_func.func;
identifier connector, connector_state;
@@
func(struct drm_connector *connector,
struct drm_connector_state *connector_state)
{
... when != connector_state
}
@ adds_state depends on connector_atomic_func && !ignores_state @
identifier connector_atomic_func.func;
identifier connector, connector_state;
@@
func(struct drm_connector *connector, struct drm_connector_state *connector_state)
{
+ struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, connector);
...
}
@ depends on connector_atomic_func @
identifier connector_atomic_func.func;
identifier connector_state;
identifier connector;
@@
func(struct drm_connector *connector,
- struct drm_connector_state *connector_state
+ struct drm_atomic_state *state
)
{ ... }
@ include depends on adds_state @
@@
#include <drm/drm_atomic.h>
@ no_include depends on !include && adds_state @
@@
+ #include <drm/drm_atomic.h>
#include <drm/...>
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Reviewed-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Cc: Leo Li <sunpeng.li@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: Melissa Wen <melissa.srw@gmail.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201118094758.506730-1-maxime@cerno.tech
2020-11-18 10:47:58 +01:00
|
|
|
#include <drm/drm_atomic.h>
|
2016-11-04 17:20:36 +10:00
|
|
|
#include <drm/drm_atomic_helper.h>
|
2014-05-31 01:48:06 +10:00
|
|
|
#include <drm/drm_dp_helper.h>
|
2019-05-19 16:00:44 +02:00
|
|
|
#include <drm/drm_edid.h>
|
2015-12-04 09:45:43 +01:00
|
|
|
#include <drm/drm_fb_helper.h>
|
2016-11-04 17:20:36 +10:00
|
|
|
#include <drm/drm_plane_helper.h>
|
2019-01-17 22:03:34 +01:00
|
|
|
#include <drm/drm_probe_helper.h>
|
2018-09-03 20:57:36 -04:00
|
|
|
#include <drm/drm_scdc_helper.h>
|
2019-05-19 16:00:44 +02:00
|
|
|
#include <drm/drm_vblank.h>
|
2011-07-04 16:25:18 +10:00
|
|
|
|
2020-07-21 11:34:07 +10:00
|
|
|
#include <nvif/push507c.h>
|
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
#include <nvif/class.h>
|
|
|
|
#include <nvif/cl0002.h>
|
|
|
|
#include <nvif/cl5070.h>
|
|
|
|
#include <nvif/cl507d.h>
|
|
|
|
#include <nvif/event.h>
|
2020-02-17 14:58:02 +10:00
|
|
|
#include <nvif/timer.h>
|
2018-01-12 17:05:33 +10:00
|
|
|
|
2020-07-21 11:34:07 +10:00
|
|
|
#include <nvhw/class/cl507c.h>
|
2020-06-20 18:09:59 +10:00
|
|
|
#include <nvhw/class/cl507d.h>
|
|
|
|
#include <nvhw/class/cl837d.h>
|
|
|
|
#include <nvhw/class/cl887d.h>
|
|
|
|
#include <nvhw/class/cl907d.h>
|
|
|
|
#include <nvhw/class/cl917d.h>
|
2020-07-21 11:34:07 +10:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
#include "nouveau_drv.h"
|
|
|
|
#include "nouveau_dma.h"
|
|
|
|
#include "nouveau_gem.h"
|
|
|
|
#include "nouveau_connector.h"
|
|
|
|
#include "nouveau_encoder.h"
|
|
|
|
#include "nouveau_fence.h"
|
|
|
|
#include "nouveau_fbcon.h"
|
2018-01-12 17:05:33 +10:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
#include <subdev/bios/dp.h>
|
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
/******************************************************************************
|
|
|
|
* EVO channel
|
|
|
|
*****************************************************************************/
|
2016-11-04 17:20:36 +10:00
|
|
|
|
|
|
|
static int
|
2018-05-08 20:39:47 +10:00
|
|
|
nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
|
|
|
|
const s32 *oclass, u8 head, void *data, u32 size,
|
|
|
|
struct nv50_chan *chan)
|
2016-11-04 17:20:36 +10:00
|
|
|
{
|
2018-05-08 20:39:47 +10:00
|
|
|
struct nvif_sclass *sclass;
|
|
|
|
int ret, i, n;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
chan->device = device;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
ret = n = nvif_object_sclass_get(disp, &sclass);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
while (oclass[0]) {
|
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
if (sclass[i].oclass == oclass[0]) {
|
2020-03-30 09:51:33 +10:00
|
|
|
ret = nvif_object_ctor(disp, "kmsChan", 0,
|
|
|
|
oclass[0], data, size,
|
|
|
|
&chan->user);
|
2018-05-08 20:39:47 +10:00
|
|
|
if (ret == 0)
|
|
|
|
nvif_object_map(&chan->user, NULL, 0);
|
|
|
|
nvif_object_sclass_put(&sclass);
|
|
|
|
return ret;
|
|
|
|
}
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
2018-05-08 20:39:47 +10:00
|
|
|
oclass++;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
nvif_object_sclass_put(&sclass);
|
|
|
|
return -ENOSYS;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
static void
|
2018-05-08 20:39:47 +10:00
|
|
|
nv50_chan_destroy(struct nv50_chan *chan)
|
2016-11-04 17:20:36 +10:00
|
|
|
{
|
2020-03-30 09:51:33 +10:00
|
|
|
nvif_object_dtor(&chan->user);
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
/******************************************************************************
|
|
|
|
* DMA EVO channel
|
|
|
|
*****************************************************************************/
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
void
|
|
|
|
nv50_dmac_destroy(struct nv50_dmac *dmac)
|
2016-11-04 17:20:36 +10:00
|
|
|
{
|
2020-03-30 09:51:33 +10:00
|
|
|
nvif_object_dtor(&dmac->vram);
|
|
|
|
nvif_object_dtor(&dmac->sync);
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
nv50_chan_destroy(&dmac->base);
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2020-06-20 07:52:26 +10:00
|
|
|
nvif_mem_dtor(&dmac->_push.mem);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_dmac_kick(struct nvif_push *push)
|
|
|
|
{
|
|
|
|
struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
|
2020-07-21 11:34:07 +10:00
|
|
|
|
|
|
|
dmac->cur = push->cur - (u32 *)dmac->_push.mem.object.map.ptr;
|
|
|
|
if (dmac->put != dmac->cur) {
|
|
|
|
/* Push buffer fetches are not coherent with BAR1, we need to ensure
|
|
|
|
* writes have been flushed right through to VRAM before writing PUT.
|
|
|
|
*/
|
|
|
|
if (dmac->push->mem.type & NVIF_MEM_VRAM) {
|
|
|
|
struct nvif_device *device = dmac->base.device;
|
|
|
|
nvif_wr32(&device->object, 0x070000, 0x00000001);
|
|
|
|
nvif_msec(device, 2000,
|
|
|
|
if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
|
|
|
|
break;
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
NVIF_WV32(&dmac->base.user, NV507C, PUT, PTR, dmac->cur);
|
|
|
|
dmac->put = dmac->cur;
|
|
|
|
}
|
|
|
|
|
|
|
|
push->bgn = push->cur;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_dmac_free(struct nv50_dmac *dmac)
|
|
|
|
{
|
|
|
|
u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR);
|
|
|
|
if (get > dmac->cur) /* NVIDIA stay 5 away from GET, do the same. */
|
|
|
|
return get - dmac->cur - 5;
|
|
|
|
return dmac->max - dmac->cur;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_dmac_wind(struct nv50_dmac *dmac)
|
|
|
|
{
|
|
|
|
/* Wait for GET to depart from the beginning of the push buffer to
|
|
|
|
* prevent writing PUT == GET, which would be ignored by HW.
|
|
|
|
*/
|
|
|
|
u32 get = NVIF_RV32(&dmac->base.user, NV507C, GET, PTR);
|
|
|
|
if (get == 0) {
|
|
|
|
/* Corner-case, HW idle, but non-committed work pending. */
|
|
|
|
if (dmac->put == 0)
|
|
|
|
nv50_dmac_kick(dmac->push);
|
|
|
|
|
|
|
|
if (nvif_msec(dmac->base.device, 2000,
|
|
|
|
if (NVIF_TV32(&dmac->base.user, NV507C, GET, PTR, >, 0))
|
|
|
|
break;
|
|
|
|
) < 0)
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
|
|
|
|
|
|
|
PUSH_RSVD(dmac->push, PUSH_JUMP(dmac->push, 0));
|
|
|
|
dmac->cur = 0;
|
|
|
|
return 0;
|
2020-06-20 07:52:26 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_dmac_wait(struct nvif_push *push, u32 size)
|
|
|
|
{
|
|
|
|
struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
|
2020-07-21 11:34:07 +10:00
|
|
|
int free;
|
|
|
|
|
|
|
|
if (WARN_ON(size > dmac->max))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
dmac->cur = push->cur - (u32 *)dmac->_push.mem.object.map.ptr;
|
|
|
|
if (dmac->cur + size >= dmac->max) {
|
|
|
|
int ret = nv50_dmac_wind(dmac);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
push->cur = dmac->_push.mem.object.map.ptr;
|
|
|
|
push->cur = push->cur + dmac->cur;
|
|
|
|
nv50_dmac_kick(push);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nvif_msec(dmac->base.device, 2000,
|
|
|
|
if ((free = nv50_dmac_free(dmac)) >= size)
|
|
|
|
break;
|
|
|
|
) < 0) {
|
|
|
|
WARN_ON(1);
|
2020-06-20 07:52:26 +10:00
|
|
|
return -ETIMEDOUT;
|
2020-07-21 11:34:07 +10:00
|
|
|
}
|
2020-06-20 07:52:26 +10:00
|
|
|
|
2020-07-21 11:34:07 +10:00
|
|
|
push->bgn = dmac->_push.mem.object.map.ptr;
|
|
|
|
push->bgn = push->bgn + dmac->cur;
|
|
|
|
push->cur = push->bgn;
|
|
|
|
push->end = push->cur + free;
|
2020-06-20 07:52:26 +10:00
|
|
|
return 0;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
int
|
|
|
|
nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
|
|
|
|
const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
|
|
|
|
struct nv50_dmac *dmac)
|
2016-11-04 17:20:36 +10:00
|
|
|
{
|
2018-05-08 20:39:47 +10:00
|
|
|
struct nouveau_cli *cli = (void *)device->object.client;
|
|
|
|
struct nv50_disp_core_channel_dma_v0 *args = data;
|
2018-07-18 09:33:39 +10:00
|
|
|
u8 type = NVIF_MEM_COHERENT;
|
2018-05-08 20:39:47 +10:00
|
|
|
int ret;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
mutex_init(&dmac->lock);
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2018-07-18 09:33:39 +10:00
|
|
|
/* Pascal added support for 47-bit physical addresses, but some
|
|
|
|
* parts of EVO still only accept 40-bit PAs.
|
|
|
|
*
|
|
|
|
* To avoid issues on systems with large amounts of RAM, and on
|
|
|
|
* systems where an IOMMU maps pages at a high address, we need
|
|
|
|
* to allocate push buffers in VRAM instead.
|
|
|
|
*
|
|
|
|
* This appears to match NVIDIA's behaviour on Pascal.
|
|
|
|
*/
|
|
|
|
if (device->info.family == NV_DEVICE_INFO_V0_PASCAL)
|
|
|
|
type |= NVIF_MEM_VRAM;
|
|
|
|
|
2020-03-30 13:56:55 +10:00
|
|
|
ret = nvif_mem_ctor_map(&cli->mmu, "kmsChanPush", type, 0x1000,
|
2020-06-20 07:52:26 +10:00
|
|
|
&dmac->_push.mem);
|
2018-05-08 20:39:47 +10:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
2011-07-05 16:48:06 +10:00
|
|
|
|
2020-06-20 07:52:26 +10:00
|
|
|
dmac->ptr = dmac->_push.mem.object.map.ptr;
|
|
|
|
dmac->_push.wait = nv50_dmac_wait;
|
|
|
|
dmac->_push.kick = nv50_dmac_kick;
|
|
|
|
dmac->push = &dmac->_push;
|
2020-07-21 11:34:07 +10:00
|
|
|
dmac->push->bgn = dmac->_push.mem.object.map.ptr;
|
|
|
|
dmac->push->cur = dmac->push->bgn;
|
|
|
|
dmac->push->end = dmac->push->bgn;
|
|
|
|
dmac->max = 0x1000/4 - 1;
|
2011-07-05 16:48:06 +10:00
|
|
|
|
2020-09-02 15:30:33 +10:00
|
|
|
/* EVO channels are affected by a HW bug where the last 12 DWORDs
|
|
|
|
* of the push buffer aren't able to be used safely.
|
|
|
|
*/
|
|
|
|
if (disp->oclass < GV100_DISP)
|
|
|
|
dmac->max -= 12;
|
|
|
|
|
2020-06-20 07:52:26 +10:00
|
|
|
args->pushbuf = nvif_handle(&dmac->_push.mem.object);
|
2011-07-05 16:48:06 +10:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
ret = nv50_chan_create(device, disp, oclass, head, data, size,
|
|
|
|
&dmac->base);
|
|
|
|
if (ret)
|
2016-11-04 17:20:36 +10:00
|
|
|
return ret;
|
|
|
|
|
2018-05-08 20:39:48 +10:00
|
|
|
if (!syncbuf)
|
|
|
|
return 0;
|
|
|
|
|
2020-03-30 09:51:33 +10:00
|
|
|
ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
|
2020-01-21 15:53:46 -05:00
|
|
|
NV_DMA_IN_MEMORY,
|
2018-05-08 20:39:47 +10:00
|
|
|
&(struct nv_dma_v0) {
|
|
|
|
.target = NV_DMA_V0_TARGET_VRAM,
|
|
|
|
.access = NV_DMA_V0_ACCESS_RDWR,
|
|
|
|
.start = syncbuf + 0x0000,
|
|
|
|
.limit = syncbuf + 0x0fff,
|
|
|
|
}, sizeof(struct nv_dma_v0),
|
|
|
|
&dmac->sync);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2012-10-16 14:18:32 +10:00
|
|
|
|
2020-03-30 09:51:33 +10:00
|
|
|
ret = nvif_object_ctor(&dmac->base.user, "kmsVramCtxDma", NV50_DISP_HANDLE_VRAM,
|
2020-01-21 15:53:46 -05:00
|
|
|
NV_DMA_IN_MEMORY,
|
2018-05-08 20:39:47 +10:00
|
|
|
&(struct nv_dma_v0) {
|
|
|
|
.target = NV_DMA_V0_TARGET_VRAM,
|
|
|
|
.access = NV_DMA_V0_ACCESS_RDWR,
|
|
|
|
.start = 0,
|
|
|
|
.limit = device->info.ram_user - 1,
|
|
|
|
}, sizeof(struct nv_dma_v0),
|
|
|
|
&dmac->vram);
|
2011-07-05 16:48:06 +10:00
|
|
|
if (ret)
|
2018-05-08 20:39:47 +10:00
|
|
|
return ret;
|
|
|
|
|
2011-07-05 16:48:06 +10:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-12-22 16:30:13 +10:00
|
|
|
/******************************************************************************
|
2016-11-04 17:20:36 +10:00
|
|
|
* Output path helpers
|
2014-12-22 16:30:13 +10:00
|
|
|
*****************************************************************************/
|
2017-05-19 23:59:35 +10:00
|
|
|
static void
|
|
|
|
nv50_outp_release(struct nouveau_encoder *nv_encoder)
|
|
|
|
{
|
|
|
|
struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
|
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
|
|
|
} args = {
|
|
|
|
.base.version = 1,
|
|
|
|
.base.method = NV50_DISP_MTHD_V1_RELEASE,
|
|
|
|
.base.hasht = nv_encoder->dcb->hasht,
|
|
|
|
.base.hashm = nv_encoder->dcb->hashm,
|
|
|
|
};
|
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
|
2017-05-19 23:59:35 +10:00
|
|
|
nv_encoder->or = -1;
|
|
|
|
nv_encoder->link = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-06-03 11:37:56 +10:00
|
|
|
nv50_outp_acquire(struct nouveau_encoder *nv_encoder, bool hda)
|
2017-05-19 23:59:35 +10:00
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
|
|
|
|
struct nv50_disp *disp = nv50_disp(drm->dev);
|
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
|
|
|
struct nv50_disp_acquire_v0 info;
|
|
|
|
} args = {
|
|
|
|
.base.version = 1,
|
|
|
|
.base.method = NV50_DISP_MTHD_V1_ACQUIRE,
|
|
|
|
.base.hasht = nv_encoder->dcb->hasht,
|
|
|
|
.base.hashm = nv_encoder->dcb->hashm,
|
2020-06-03 11:37:56 +10:00
|
|
|
.info.hda = hda,
|
2017-05-19 23:59:35 +10:00
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
ret = nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
|
2017-05-19 23:59:35 +10:00
|
|
|
if (ret) {
|
|
|
|
NV_ERROR(drm, "error acquiring output path: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
nv_encoder->or = args.info.or;
|
|
|
|
nv_encoder->link = args.info.link;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
static int
|
|
|
|
nv50_outp_atomic_check_view(struct drm_encoder *encoder,
|
|
|
|
struct drm_crtc_state *crtc_state,
|
|
|
|
struct drm_connector_state *conn_state,
|
|
|
|
struct drm_display_mode *native_mode)
|
|
|
|
{
|
|
|
|
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
|
|
|
|
struct drm_display_mode *mode = &crtc_state->mode;
|
|
|
|
struct drm_connector *connector = conn_state->connector;
|
|
|
|
struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
|
|
|
|
asyc->scaler.full = false;
|
|
|
|
if (!native_mode)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
|
|
|
|
switch (connector->connector_type) {
|
|
|
|
case DRM_MODE_CONNECTOR_LVDS:
|
|
|
|
case DRM_MODE_CONNECTOR_eDP:
|
2019-05-25 18:41:48 -04:00
|
|
|
/* Don't force scaler for EDID modes with
|
|
|
|
* same size as the native one (e.g. different
|
|
|
|
* refresh rate)
|
|
|
|
*/
|
2019-12-10 12:15:44 +10:00
|
|
|
if (mode->hdisplay == native_mode->hdisplay &&
|
|
|
|
mode->vdisplay == native_mode->vdisplay &&
|
|
|
|
mode->type & DRM_MODE_TYPE_DRIVER)
|
2016-11-04 17:20:36 +10:00
|
|
|
break;
|
|
|
|
mode = native_mode;
|
|
|
|
asyc->scaler.full = true;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mode = native_mode;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!drm_mode_equal(adjusted_mode, mode)) {
|
|
|
|
drm_mode_copy(adjusted_mode, mode);
|
|
|
|
crtc_state->mode_changed = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
static int
|
|
|
|
nv50_outp_atomic_check(struct drm_encoder *encoder,
|
|
|
|
struct drm_crtc_state *crtc_state,
|
|
|
|
struct drm_connector_state *conn_state)
|
2014-12-22 16:30:13 +10:00
|
|
|
{
|
2019-11-15 16:07:19 -05:00
|
|
|
struct drm_connector *connector = conn_state->connector;
|
|
|
|
struct nouveau_connector *nv_connector = nouveau_connector(connector);
|
|
|
|
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
|
|
|
|
nv_connector->native_mode);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (crtc_state->mode_changed || crtc_state->connectors_changed)
|
|
|
|
asyh->or.bpc = connector->display_info.bpc;
|
|
|
|
|
|
|
|
return 0;
|
2014-12-22 16:30:13 +10:00
|
|
|
}
|
|
|
|
|
2020-08-26 14:24:42 -04:00
|
|
|
struct nouveau_connector *
|
|
|
|
nv50_outp_get_new_connector(struct nouveau_encoder *outp,
|
|
|
|
struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
struct drm_connector *connector;
|
|
|
|
struct drm_connector_state *connector_state;
|
|
|
|
struct drm_encoder *encoder = to_drm_encoder(outp);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_new_connector_in_state(state, connector, connector_state, i) {
|
|
|
|
if (connector_state->best_encoder == encoder)
|
|
|
|
return nouveau_connector(connector);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct nouveau_connector *
|
|
|
|
nv50_outp_get_old_connector(struct nouveau_encoder *outp,
|
|
|
|
struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
struct drm_connector *connector;
|
|
|
|
struct drm_connector_state *connector_state;
|
|
|
|
struct drm_encoder *encoder = to_drm_encoder(outp);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_old_connector_in_state(state, connector, connector_state, i) {
|
|
|
|
if (connector_state->best_encoder == encoder)
|
|
|
|
return nouveau_connector(connector);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-07-04 16:25:18 +10:00
|
|
|
/******************************************************************************
|
|
|
|
* DAC
|
|
|
|
*****************************************************************************/
|
2018-05-08 20:39:47 +10:00
|
|
|
static void
|
2020-11-13 19:14:10 -05:00
|
|
|
nv50_dac_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
|
2018-05-08 20:39:47 +10:00
|
|
|
{
|
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
|
|
|
struct nv50_core *core = nv50_disp(encoder->dev)->core;
|
2020-06-20 18:09:59 +10:00
|
|
|
const u32 ctrl = NVDEF(NV507D, DAC_SET_CONTROL, OWNER, NONE);
|
2018-05-08 20:39:47 +10:00
|
|
|
if (nv_encoder->crtc)
|
2020-06-20 18:09:59 +10:00
|
|
|
core->func->dac->ctrl(core, nv_encoder->or, ctrl, NULL);
|
2016-11-04 17:20:36 +10:00
|
|
|
nv_encoder->crtc = NULL;
|
2017-05-19 23:59:35 +10:00
|
|
|
nv50_outp_release(nv_encoder);
|
2011-07-06 15:25:47 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-11-13 19:14:10 -05:00
|
|
|
nv50_dac_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
|
2011-07-06 15:25:47 +10:00
|
|
|
{
|
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
|
|
|
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
|
2018-05-08 20:39:47 +10:00
|
|
|
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
|
2018-05-08 20:39:47 +10:00
|
|
|
struct nv50_core *core = nv50_disp(encoder->dev)->core;
|
2020-06-20 18:09:59 +10:00
|
|
|
u32 ctrl = 0;
|
|
|
|
|
|
|
|
switch (nv_crtc->index) {
|
|
|
|
case 0: ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, OWNER, HEAD0); break;
|
|
|
|
case 1: ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, OWNER, HEAD1); break;
|
|
|
|
case 2: ctrl |= NVDEF(NV907D, DAC_SET_CONTROL, OWNER_MASK, HEAD2); break;
|
|
|
|
case 3: ctrl |= NVDEF(NV907D, DAC_SET_CONTROL, OWNER_MASK, HEAD3); break;
|
|
|
|
default:
|
|
|
|
WARN_ON(1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, PROTOCOL, RGB_CRT);
|
2011-07-06 15:25:47 +10:00
|
|
|
|
2020-06-03 11:37:56 +10:00
|
|
|
nv50_outp_acquire(nv_encoder, false);
|
2017-05-19 23:59:35 +10:00
|
|
|
|
2020-06-20 18:09:59 +10:00
|
|
|
core->func->dac->ctrl(core, nv_encoder->or, ctrl, asyh);
|
2018-05-08 20:39:47 +10:00
|
|
|
asyh->or.depth = 0;
|
2011-07-06 15:25:47 +10:00
|
|
|
|
|
|
|
nv_encoder->crtc = encoder->crtc;
|
|
|
|
}
|
|
|
|
|
2011-07-07 09:51:29 +10:00
|
|
|
static enum drm_connector_status
|
2012-11-21 14:40:21 +10:00
|
|
|
nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
|
2011-07-07 09:51:29 +10:00
|
|
|
{
|
2014-08-10 04:10:26 +10:00
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
2012-11-21 14:40:21 +10:00
|
|
|
struct nv50_disp *disp = nv50_disp(encoder->dev);
|
2014-08-10 04:10:26 +10:00
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
|
|
|
struct nv50_disp_dac_load_v0 load;
|
|
|
|
} args = {
|
|
|
|
.base.version = 1,
|
|
|
|
.base.method = NV50_DISP_MTHD_V1_DAC_LOAD,
|
|
|
|
.base.hasht = nv_encoder->dcb->hasht,
|
|
|
|
.base.hashm = nv_encoder->dcb->hashm,
|
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
args.load.data = nouveau_drm(encoder->dev)->vbios.dactestval;
|
|
|
|
if (args.load.data == 0)
|
|
|
|
args.load.data = 340;
|
2011-07-08 11:14:50 +10:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
ret = nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
|
2014-08-10 04:10:26 +10:00
|
|
|
if (ret || !args.load.load)
|
2012-11-08 12:08:55 +10:00
|
|
|
return connector_status_disconnected;
|
2011-07-08 11:14:50 +10:00
|
|
|
|
2012-11-08 12:08:55 +10:00
|
|
|
return connector_status_connected;
|
2011-07-07 09:51:29 +10:00
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
static const struct drm_encoder_helper_funcs
|
|
|
|
nv50_dac_help = {
|
2016-11-04 17:20:36 +10:00
|
|
|
.atomic_check = nv50_outp_atomic_check,
|
2020-11-13 19:14:10 -05:00
|
|
|
.atomic_enable = nv50_dac_enable,
|
|
|
|
.atomic_disable = nv50_dac_disable,
|
2012-11-21 14:40:21 +10:00
|
|
|
.detect = nv50_dac_detect
|
2011-07-06 15:25:47 +10:00
|
|
|
};
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
static void
|
|
|
|
nv50_dac_destroy(struct drm_encoder *encoder)
|
|
|
|
{
|
|
|
|
drm_encoder_cleanup(encoder);
|
|
|
|
kfree(encoder);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_encoder_funcs
|
|
|
|
nv50_dac_func = {
|
2012-11-21 14:40:21 +10:00
|
|
|
.destroy = nv50_dac_destroy,
|
2011-07-06 15:25:47 +10:00
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
2012-11-21 14:40:21 +10:00
|
|
|
nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
2011-07-06 15:25:47 +10:00
|
|
|
{
|
2013-02-11 20:15:03 +10:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(connector->dev);
|
2016-05-18 13:57:42 +10:00
|
|
|
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
|
2015-08-20 14:54:15 +10:00
|
|
|
struct nvkm_i2c_bus *bus;
|
2011-07-06 15:25:47 +10:00
|
|
|
struct nouveau_encoder *nv_encoder;
|
|
|
|
struct drm_encoder *encoder;
|
2013-02-11 20:15:03 +10:00
|
|
|
int type = DRM_MODE_ENCODER_DAC;
|
2011-07-06 15:25:47 +10:00
|
|
|
|
|
|
|
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
|
|
|
|
if (!nv_encoder)
|
|
|
|
return -ENOMEM;
|
|
|
|
nv_encoder->dcb = dcbe;
|
2015-08-20 14:54:15 +10:00
|
|
|
|
|
|
|
bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
|
|
|
|
if (bus)
|
|
|
|
nv_encoder->i2c = &bus->i2c;
|
2011-07-06 15:25:47 +10:00
|
|
|
|
|
|
|
encoder = to_drm_encoder(nv_encoder);
|
|
|
|
encoder->possible_crtcs = dcbe->heads;
|
|
|
|
encoder->possible_clones = 0;
|
2016-11-04 17:20:36 +10:00
|
|
|
drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
|
|
|
|
"dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
|
2016-11-04 17:20:36 +10:00
|
|
|
drm_encoder_helper_add(encoder, &nv50_dac_help);
|
2011-07-06 15:25:47 +10:00
|
|
|
|
2018-07-09 10:40:07 +02:00
|
|
|
drm_connector_attach_encoder(connector, encoder);
|
2011-07-06 15:25:47 +10:00
|
|
|
return 0;
|
|
|
|
}
|
2011-07-04 16:25:18 +10:00
|
|
|
|
2020-01-13 15:17:21 +01:00
|
|
|
/*
|
|
|
|
* audio component binding for ELD notification
|
|
|
|
*/
|
|
|
|
static void
|
2020-04-16 09:54:28 +02:00
|
|
|
nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port,
|
|
|
|
int dev_id)
|
2020-01-13 15:17:21 +01:00
|
|
|
{
|
|
|
|
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
|
|
|
|
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
|
2020-04-16 09:54:28 +02:00
|
|
|
port, dev_id);
|
2020-01-13 15:17:21 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2020-04-16 09:54:28 +02:00
|
|
|
nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
|
2020-01-13 15:17:21 +01:00
|
|
|
bool *enabled, unsigned char *buf, int max_bytes)
|
|
|
|
{
|
|
|
|
struct drm_device *drm_dev = dev_get_drvdata(kdev);
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(drm_dev);
|
|
|
|
struct drm_encoder *encoder;
|
|
|
|
struct nouveau_encoder *nv_encoder;
|
2020-08-26 14:24:42 -04:00
|
|
|
struct drm_connector *connector;
|
2020-01-13 15:17:21 +01:00
|
|
|
struct nouveau_crtc *nv_crtc;
|
2020-08-26 14:24:42 -04:00
|
|
|
struct drm_connector_list_iter conn_iter;
|
2020-01-13 15:17:21 +01:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
*enabled = false;
|
2020-08-26 14:24:42 -04:00
|
|
|
|
2020-01-13 15:17:21 +01:00
|
|
|
drm_for_each_encoder(encoder, drm->dev) {
|
2020-08-26 14:24:42 -04:00
|
|
|
struct nouveau_connector *nv_connector = NULL;
|
|
|
|
|
2020-01-13 15:17:21 +01:00
|
|
|
nv_encoder = nouveau_encoder(encoder);
|
2020-08-26 14:24:42 -04:00
|
|
|
|
|
|
|
drm_connector_list_iter_begin(drm_dev, &conn_iter);
|
|
|
|
drm_for_each_connector_iter(connector, &conn_iter) {
|
|
|
|
if (connector->state->best_encoder == encoder) {
|
|
|
|
nv_connector = nouveau_connector(connector);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
drm_connector_list_iter_end(&conn_iter);
|
|
|
|
if (!nv_connector)
|
|
|
|
continue;
|
|
|
|
|
2020-01-13 15:17:21 +01:00
|
|
|
nv_crtc = nouveau_crtc(encoder->crtc);
|
2020-08-26 14:24:42 -04:00
|
|
|
if (!nv_crtc || nv_encoder->or != port ||
|
2020-04-16 09:54:28 +02:00
|
|
|
nv_crtc->index != dev_id)
|
2020-01-13 15:17:21 +01:00
|
|
|
continue;
|
2020-05-29 17:57:29 +10:00
|
|
|
*enabled = nv_encoder->audio;
|
2020-01-13 15:17:21 +01:00
|
|
|
if (*enabled) {
|
|
|
|
ret = drm_eld_size(nv_connector->base.eld);
|
|
|
|
memcpy(buf, nv_connector->base.eld,
|
|
|
|
min(max_bytes, ret));
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2020-08-26 14:24:42 -04:00
|
|
|
|
2020-01-13 15:17:21 +01:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_audio_component_ops nv50_audio_component_ops = {
|
|
|
|
.get_eld = nv50_audio_component_get_eld,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_audio_component_bind(struct device *kdev, struct device *hda_kdev,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct drm_device *drm_dev = dev_get_drvdata(kdev);
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(drm_dev);
|
|
|
|
struct drm_audio_component *acomp = data;
|
|
|
|
|
|
|
|
if (WARN_ON(!device_link_add(hda_kdev, kdev, DL_FLAG_STATELESS)))
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
drm_modeset_lock_all(drm_dev);
|
|
|
|
acomp->ops = &nv50_audio_component_ops;
|
|
|
|
acomp->dev = kdev;
|
|
|
|
drm->audio.component = acomp;
|
|
|
|
drm_modeset_unlock_all(drm_dev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_audio_component_unbind(struct device *kdev, struct device *hda_kdev,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct drm_device *drm_dev = dev_get_drvdata(kdev);
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(drm_dev);
|
|
|
|
struct drm_audio_component *acomp = data;
|
|
|
|
|
|
|
|
drm_modeset_lock_all(drm_dev);
|
|
|
|
drm->audio.component = NULL;
|
|
|
|
acomp->ops = NULL;
|
|
|
|
acomp->dev = NULL;
|
|
|
|
drm_modeset_unlock_all(drm_dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct component_ops nv50_audio_component_bind_ops = {
|
|
|
|
.bind = nv50_audio_component_bind,
|
|
|
|
.unbind = nv50_audio_component_unbind,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_audio_component_init(struct nouveau_drm *drm)
|
|
|
|
{
|
|
|
|
if (!component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
|
|
|
|
drm->audio.component_registered = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_audio_component_fini(struct nouveau_drm *drm)
|
|
|
|
{
|
|
|
|
if (drm->audio.component_registered) {
|
|
|
|
component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
|
|
|
|
drm->audio.component_registered = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-11-11 18:13:13 +10:00
|
|
|
/******************************************************************************
|
|
|
|
* Audio
|
|
|
|
*****************************************************************************/
|
|
|
|
static void
|
2016-11-04 17:20:36 +10:00
|
|
|
nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
|
|
|
|
{
|
2020-01-13 15:17:21 +01:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
|
|
|
struct nv50_disp *disp = nv50_disp(encoder->dev);
|
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
|
|
|
struct nv50_disp_sor_hda_eld_v0 eld;
|
|
|
|
} args = {
|
|
|
|
.base.version = 1,
|
|
|
|
.base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
|
|
|
|
.base.hasht = nv_encoder->dcb->hasht,
|
|
|
|
.base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
|
|
|
|
(0x0100 << nv_crtc->index),
|
|
|
|
};
|
|
|
|
|
2020-06-17 11:08:41 +10:00
|
|
|
if (!nv_encoder->audio)
|
|
|
|
return;
|
|
|
|
|
2020-05-29 17:57:29 +10:00
|
|
|
nv_encoder->audio = false;
|
2018-05-08 20:39:47 +10:00
|
|
|
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
|
2020-01-13 15:17:21 +01:00
|
|
|
|
2020-04-16 09:54:28 +02:00
|
|
|
nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
|
|
|
|
nv_crtc->index);
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-08-26 14:24:42 -04:00
|
|
|
nv50_audio_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
|
|
|
|
struct drm_display_mode *mode)
|
2011-11-11 18:13:13 +10:00
|
|
|
{
|
2020-01-13 15:17:21 +01:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
|
2011-11-11 18:13:13 +10:00
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
2014-09-15 21:29:05 +10:00
|
|
|
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
|
2011-11-11 18:13:13 +10:00
|
|
|
struct nouveau_connector *nv_connector;
|
2012-11-21 14:40:21 +10:00
|
|
|
struct nv50_disp *disp = nv50_disp(encoder->dev);
|
2014-09-15 21:11:51 +10:00
|
|
|
struct __packed {
|
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 mthd;
|
|
|
|
struct nv50_disp_sor_hda_eld_v0 eld;
|
|
|
|
} base;
|
2014-08-10 04:10:26 +10:00
|
|
|
u8 data[sizeof(nv_connector->base.eld)];
|
|
|
|
} args = {
|
2014-09-15 21:11:51 +10:00
|
|
|
.base.mthd.version = 1,
|
|
|
|
.base.mthd.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
|
|
|
|
.base.mthd.hasht = nv_encoder->dcb->hasht,
|
2014-09-15 21:29:05 +10:00
|
|
|
.base.mthd.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
|
|
|
|
(0x0100 << nv_crtc->index),
|
2014-08-10 04:10:26 +10:00
|
|
|
};
|
2011-11-11 18:13:13 +10:00
|
|
|
|
2020-08-26 14:24:42 -04:00
|
|
|
nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
|
2011-11-11 18:13:13 +10:00
|
|
|
if (!drm_detect_monitor_audio(nv_connector->edid))
|
|
|
|
return;
|
|
|
|
|
2014-08-10 04:10:26 +10:00
|
|
|
memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
|
2011-11-11 18:13:13 +10:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
nvif_mthd(&disp->disp->object, 0, &args,
|
2014-10-28 16:20:48 +02:00
|
|
|
sizeof(args.base) + drm_eld_size(args.data));
|
2020-05-29 17:57:29 +10:00
|
|
|
nv_encoder->audio = true;
|
2020-01-13 15:17:21 +01:00
|
|
|
|
2020-04-16 09:54:28 +02:00
|
|
|
nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
|
|
|
|
nv_crtc->index);
|
2011-11-11 18:13:13 +10:00
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
/******************************************************************************
|
|
|
|
* HDMI
|
|
|
|
*****************************************************************************/
|
2011-11-11 18:13:13 +10:00
|
|
|
static void
|
2016-11-04 17:20:36 +10:00
|
|
|
nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
|
2011-11-11 18:13:13 +10:00
|
|
|
{
|
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
2012-11-21 14:40:21 +10:00
|
|
|
struct nv50_disp *disp = nv50_disp(encoder->dev);
|
2014-08-10 04:10:26 +10:00
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nv50_disp_sor_hdmi_pwr_v0 pwr;
|
2014-08-10 04:10:26 +10:00
|
|
|
} args = {
|
|
|
|
.base.version = 1,
|
2016-11-04 17:20:36 +10:00
|
|
|
.base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
|
|
|
|
.base.hasht = nv_encoder->dcb->hasht,
|
|
|
|
.base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
|
|
|
|
(0x0100 << nv_crtc->index),
|
2014-08-10 04:10:26 +10:00
|
|
|
};
|
2011-11-11 18:13:13 +10:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
|
2011-11-11 18:13:13 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-08-26 14:24:42 -04:00
|
|
|
nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_atomic_state *state,
|
|
|
|
struct drm_display_mode *mode)
|
2011-11-11 18:13:13 +10:00
|
|
|
{
|
2018-09-03 20:57:36 -04:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
|
2011-11-11 19:51:20 +10:00
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
|
|
|
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
|
2012-11-21 14:40:21 +10:00
|
|
|
struct nv50_disp *disp = nv50_disp(encoder->dev);
|
2014-08-10 04:10:26 +10:00
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
|
|
|
struct nv50_disp_sor_hdmi_pwr_v0 pwr;
|
2017-04-11 13:11:18 -04:00
|
|
|
u8 infoframes[2 * 17]; /* two frames, up to 17 bytes each */
|
2014-08-10 04:10:26 +10:00
|
|
|
} args = {
|
|
|
|
.base.version = 1,
|
|
|
|
.base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
|
|
|
|
.base.hasht = nv_encoder->dcb->hasht,
|
|
|
|
.base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
|
|
|
|
(0x0100 << nv_crtc->index),
|
|
|
|
.pwr.state = 1,
|
|
|
|
.pwr.rekey = 56, /* binary driver, and tegra, constant */
|
|
|
|
};
|
|
|
|
struct nouveau_connector *nv_connector;
|
2018-09-03 20:57:36 -04:00
|
|
|
struct drm_hdmi_info *hdmi;
|
2011-11-11 19:51:20 +10:00
|
|
|
u32 max_ac_packet;
|
2017-04-11 13:11:18 -04:00
|
|
|
union hdmi_infoframe avi_frame;
|
|
|
|
union hdmi_infoframe vendor_frame;
|
2019-01-08 19:28:25 +02:00
|
|
|
bool high_tmds_clock_ratio = false, scrambling = false;
|
2018-09-03 20:57:36 -04:00
|
|
|
u8 config;
|
2017-04-11 13:11:18 -04:00
|
|
|
int ret;
|
|
|
|
int size;
|
2011-11-11 19:51:20 +10:00
|
|
|
|
2020-08-26 14:24:42 -04:00
|
|
|
nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
|
2011-11-11 19:51:20 +10:00
|
|
|
if (!drm_detect_hdmi_monitor(nv_connector->edid))
|
|
|
|
return;
|
|
|
|
|
2018-09-03 20:57:36 -04:00
|
|
|
hdmi = &nv_connector->base.display_info.hdmi;
|
|
|
|
|
2019-01-08 19:28:25 +02:00
|
|
|
ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi,
|
|
|
|
&nv_connector->base, mode);
|
2017-04-11 13:11:18 -04:00
|
|
|
if (!ret) {
|
|
|
|
/* We have an AVI InfoFrame, populate it to the display */
|
|
|
|
args.pwr.avi_infoframe_length
|
|
|
|
= hdmi_infoframe_pack(&avi_frame, args.infoframes, 17);
|
|
|
|
}
|
|
|
|
|
2017-11-13 19:04:19 +02:00
|
|
|
ret = drm_hdmi_vendor_infoframe_from_display_mode(&vendor_frame.vendor.hdmi,
|
|
|
|
&nv_connector->base, mode);
|
2017-04-11 13:11:18 -04:00
|
|
|
if (!ret) {
|
|
|
|
/* We have a Vendor InfoFrame, populate it to the display */
|
|
|
|
args.pwr.vendor_infoframe_length
|
|
|
|
= hdmi_infoframe_pack(&vendor_frame,
|
|
|
|
args.infoframes
|
|
|
|
+ args.pwr.avi_infoframe_length,
|
|
|
|
17);
|
|
|
|
}
|
|
|
|
|
2011-11-11 19:51:20 +10:00
|
|
|
max_ac_packet = mode->htotal - mode->hdisplay;
|
2014-08-10 04:10:26 +10:00
|
|
|
max_ac_packet -= args.pwr.rekey;
|
2011-11-11 19:51:20 +10:00
|
|
|
max_ac_packet -= 18; /* constant from tegra */
|
2014-08-10 04:10:26 +10:00
|
|
|
args.pwr.max_ac_packet = max_ac_packet / 32;
|
2011-11-11 20:46:00 +10:00
|
|
|
|
2018-09-03 20:57:36 -04:00
|
|
|
if (hdmi->scdc.scrambling.supported) {
|
|
|
|
high_tmds_clock_ratio = mode->clock > 340000;
|
|
|
|
scrambling = high_tmds_clock_ratio ||
|
|
|
|
hdmi->scdc.scrambling.low_rates;
|
|
|
|
}
|
|
|
|
|
|
|
|
args.pwr.scdc =
|
|
|
|
NV50_DISP_SOR_HDMI_PWR_V0_SCDC_SCRAMBLE * scrambling |
|
|
|
|
NV50_DISP_SOR_HDMI_PWR_V0_SCDC_DIV_BY_4 * high_tmds_clock_ratio;
|
|
|
|
|
2017-04-11 13:11:18 -04:00
|
|
|
size = sizeof(args.base)
|
|
|
|
+ sizeof(args.pwr)
|
|
|
|
+ args.pwr.avi_infoframe_length
|
|
|
|
+ args.pwr.vendor_infoframe_length;
|
2018-05-08 20:39:47 +10:00
|
|
|
nvif_mthd(&disp->disp->object, 0, &args, size);
|
2018-09-03 20:57:36 -04:00
|
|
|
|
2020-08-26 14:24:42 -04:00
|
|
|
nv50_audio_enable(encoder, state, mode);
|
2018-09-03 20:57:36 -04:00
|
|
|
|
|
|
|
/* If SCDC is supported by the downstream monitor, update
|
|
|
|
* divider / scrambling settings to what we programmed above.
|
|
|
|
*/
|
|
|
|
if (!hdmi->scdc.scrambling.supported)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ret = drm_scdc_readb(nv_encoder->i2c, SCDC_TMDS_CONFIG, &config);
|
|
|
|
if (ret < 0) {
|
|
|
|
NV_ERROR(drm, "Failure to read SCDC_TMDS_CONFIG: %d\n", ret);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
config &= ~(SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 | SCDC_SCRAMBLING_ENABLE);
|
|
|
|
config |= SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 * high_tmds_clock_ratio;
|
|
|
|
config |= SCDC_SCRAMBLING_ENABLE * scrambling;
|
|
|
|
ret = drm_scdc_writeb(nv_encoder->i2c, SCDC_TMDS_CONFIG, config);
|
|
|
|
if (ret < 0)
|
|
|
|
NV_ERROR(drm, "Failure to write SCDC_TMDS_CONFIG = 0x%02x: %d\n",
|
|
|
|
config, ret);
|
2011-11-11 18:13:13 +10:00
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
/******************************************************************************
|
|
|
|
* MST
|
|
|
|
*****************************************************************************/
|
2016-11-04 17:20:36 +10:00
|
|
|
#define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
|
|
|
|
#define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
|
|
|
|
#define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
|
|
|
|
|
|
|
|
struct nv50_mstc {
|
|
|
|
struct nv50_mstm *mstm;
|
|
|
|
struct drm_dp_mst_port *port;
|
|
|
|
struct drm_connector connector;
|
|
|
|
|
|
|
|
struct drm_display_mode *native;
|
|
|
|
struct edid *edid;
|
2016-11-04 17:20:36 +10:00
|
|
|
};
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nv50_msto {
|
|
|
|
struct drm_encoder encoder;
|
|
|
|
|
|
|
|
struct nv50_head *head;
|
|
|
|
struct nv50_mstc *mstc;
|
|
|
|
bool disabled;
|
|
|
|
};
|
|
|
|
|
drm/nouveau/kms/nvd9-: Add CRC support
This introduces support for CRC readback on gf119+, using the
documentation generously provided to us by Nvidia:
https://github.com/NVIDIA/open-gpu-doc/blob/master/Display-CRC/display-crc.txt
We expose all available CRC sources. SF, SOR, PIOR, and DAC are exposed
through a single set of "outp" sources: outp-active/auto for a CRC of
the scanout region, outp-complete for a CRC of both the scanout and
blanking/sync region combined, and outp-inactive for a CRC of only the
blanking/sync region. For each source, nouveau selects the appropriate
tap point based on the output path in use. We also expose an "rg"
source, which allows for capturing CRCs of the scanout raster before
it's encoded into a video signal in the output path. This tap point is
referred to as the raster generator.
Note that while there's some other neat features that can be used with
CRC capture on nvidia hardware, like capturing from two CRC sources
simultaneously, I couldn't see any usecase for them and did not
implement them.
Nvidia only allows for accessing CRCs through a shared DMA region that
we program through the core EVO/NvDisplay channel which is referred to
as the notifier context. The notifier context is limited to either 255
(for Fermi-Pascal) or 2047 (Volta+) entries to store CRCs in, and
unfortunately the hardware simply drops CRCs and reports an overflow
once all available entries in the notifier context are filled.
Since the DRM CRC API and igt-gpu-tools don't expect there to be a limit
on how many CRCs can be captured, we work around this in nouveau by
allocating two separate notifier contexts for each head instead of one.
We schedule a vblank worker ahead of time so that once we start getting
close to filling up all of the available entries in the notifier
context, we can swap the currently used notifier context out with
another pre-prepared notifier context in a manner similar to page
flipping.
Unfortunately, the hardware only allows us to this by flushing two
separate updates on the core channel: one to release the current
notifier context handle, and one to program the next notifier context's
handle. When the hardware processes the first update, the CRC for the
current frame is lost. However, the second update can be flushed
immediately without waiting for the first to complete so that CRC
generation resumes on the next frame. According to Nvidia's hardware
engineers, there isn't any cleaner way of flipping notifier contexts
that would avoid this.
Since using vblank workers to swap out the notifier context will ensure
we can usually flush both updates to hardware within the timespan of a
single frame, we can also ensure that there will only be exactly one
frame lost between the first and second update being executed by the
hardware. This gives us the guarantee that we're always correctly
matching each CRC entry with it's respective frame even after a context
flip. And since IGT will retrieve the CRC entry for a frame by waiting
until it receives a CRC for any subsequent frames, this doesn't cause an
issue with any tests and is much simpler than trying to change the
current DRM API to accommodate.
In order to facilitate testing of correct handling of this limitation,
we also expose a debugfs interface to manually control the threshold for
when we start trying to flip the notifier context. We will use this in
igt to trigger a context flip for testing purposes without needing to
wait for the notifier to completely fill up. This threshold is reset
to the default value set by nouveau after each capture, and is exposed
in a separate folder within each CRTC's debugfs directory labelled
"nv_crc".
Changes since v1:
* Forgot to finish saving crc.h before saving, whoops. This just adds
some corrections to the empty function declarations that we use if
CONFIG_DEBUG_FS isn't enabled.
Changes since v2:
* Don't check return code from debugfs_create_dir() or
debugfs_create_file() - Greg K-H
Changes since v3:
(no functional changes)
* Fix SPDX license identifiers (checkpatch)
* s/uint32_t/u32/ (checkpatch)
* Fix indenting in switch cases (checkpatch)
Changes since v4:
* Remove unneeded param changes with nv50_head_flush_clr/set
* Rebase
Changes since v5:
* Remove set but unused variable (outp) in nv50_crc_atomic_check() -
Kbuild bot
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Acked-by: Dave Airlie <airlied@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200627194657.156514-10-lyude@redhat.com
2019-10-07 14:20:12 -04:00
|
|
|
struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder)
|
|
|
|
{
|
|
|
|
struct nv50_msto *msto;
|
|
|
|
|
|
|
|
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST)
|
|
|
|
return nouveau_encoder(encoder);
|
|
|
|
|
|
|
|
msto = nv50_msto(encoder);
|
|
|
|
if (!msto->mstc)
|
|
|
|
return NULL;
|
|
|
|
return msto->mstc->mstm->outp;
|
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
static struct drm_dp_payload *
|
|
|
|
nv50_msto_payload(struct nv50_msto *msto)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
|
|
|
|
struct nv50_mstc *mstc = msto->mstc;
|
|
|
|
struct nv50_mstm *mstm = mstc->mstm;
|
|
|
|
int vcpi = mstc->port->vcpi.vcpi, i;
|
|
|
|
|
2019-01-10 19:53:39 -05:00
|
|
|
WARN_ON(!mutex_is_locked(&mstm->mgr.payload_lock));
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
|
|
|
|
for (i = 0; i < mstm->mgr.max_payloads; i++) {
|
|
|
|
struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
|
|
|
|
NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
|
|
|
|
mstm->outp->base.base.name, i, payload->vcpi,
|
|
|
|
payload->start_slot, payload->num_slots);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < mstm->mgr.max_payloads; i++) {
|
|
|
|
struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
|
|
|
|
if (payload->vcpi == vcpi)
|
|
|
|
return payload;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_msto_cleanup(struct nv50_msto *msto)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
|
|
|
|
struct nv50_mstc *mstc = msto->mstc;
|
|
|
|
struct nv50_mstm *mstm = mstc->mstm;
|
|
|
|
|
2019-01-10 19:53:36 -05:00
|
|
|
if (!msto->disabled)
|
|
|
|
return;
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
|
2019-01-10 19:53:36 -05:00
|
|
|
|
2019-01-10 19:53:38 -05:00
|
|
|
drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
|
2019-01-10 19:53:36 -05:00
|
|
|
|
|
|
|
msto->mstc = NULL;
|
|
|
|
msto->disabled = false;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_msto_prepare(struct nv50_msto *msto)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
|
|
|
|
struct nv50_mstc *mstc = msto->mstc;
|
|
|
|
struct nv50_mstm *mstm = mstc->mstm;
|
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
|
|
|
struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
|
|
|
|
} args = {
|
|
|
|
.base.version = 1,
|
|
|
|
.base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI,
|
|
|
|
.base.hasht = mstm->outp->dcb->hasht,
|
|
|
|
.base.hashm = (0xf0ff & mstm->outp->dcb->hashm) |
|
|
|
|
(0x0100 << msto->head->base.index),
|
|
|
|
};
|
|
|
|
|
2019-01-10 19:53:39 -05:00
|
|
|
mutex_lock(&mstm->mgr.payload_lock);
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
|
2019-01-10 19:53:38 -05:00
|
|
|
if (mstc->port->vcpi.vcpi > 0) {
|
2016-11-04 17:20:36 +10:00
|
|
|
struct drm_dp_payload *payload = nv50_msto_payload(msto);
|
|
|
|
if (payload) {
|
|
|
|
args.vcpi.start_slot = payload->start_slot;
|
|
|
|
args.vcpi.num_slots = payload->num_slots;
|
|
|
|
args.vcpi.pbn = mstc->port->vcpi.pbn;
|
|
|
|
args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
|
|
|
|
msto->encoder.name, msto->head->base.base.name,
|
|
|
|
args.vcpi.start_slot, args.vcpi.num_slots,
|
|
|
|
args.vcpi.pbn, args.vcpi.aligned_pbn);
|
2019-01-10 19:53:39 -05:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args));
|
2019-01-10 19:53:39 -05:00
|
|
|
mutex_unlock(&mstm->mgr.payload_lock);
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_msto_atomic_check(struct drm_encoder *encoder,
|
|
|
|
struct drm_crtc_state *crtc_state,
|
|
|
|
struct drm_connector_state *conn_state)
|
|
|
|
{
|
2019-01-10 19:53:43 -05:00
|
|
|
struct drm_atomic_state *state = crtc_state->state;
|
|
|
|
struct drm_connector *connector = conn_state->connector;
|
|
|
|
struct nv50_mstc *mstc = nv50_mstc(connector);
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nv50_mstm *mstm = mstc->mstm;
|
2019-02-01 19:20:04 -05:00
|
|
|
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
|
2016-11-04 17:20:36 +10:00
|
|
|
int slots;
|
2019-11-15 16:07:18 -05:00
|
|
|
int ret;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2019-11-15 16:07:18 -05:00
|
|
|
ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
|
|
|
|
mstc->native);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2019-11-15 16:07:18 -05:00
|
|
|
if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
|
|
|
|
return 0;
|
2019-02-01 19:20:04 -05:00
|
|
|
|
2019-11-15 16:07:18 -05:00
|
|
|
/*
|
|
|
|
* When restoring duplicated states, we need to make sure that the bw
|
|
|
|
* remains the same and avoid recalculating it, as the connector's bpc
|
|
|
|
* may have changed after the state was duplicated
|
|
|
|
*/
|
|
|
|
if (!state->duplicated) {
|
|
|
|
const int clock = crtc_state->adjusted_mode.clock;
|
|
|
|
|
2020-05-11 18:41:26 -04:00
|
|
|
asyh->or.bpc = connector->display_info.bpc;
|
|
|
|
asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3,
|
|
|
|
false);
|
2019-01-10 19:53:43 -05:00
|
|
|
}
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2019-11-15 16:07:18 -05:00
|
|
|
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
|
2019-11-14 16:24:29 -05:00
|
|
|
asyh->dp.pbn, 0);
|
2019-11-15 16:07:18 -05:00
|
|
|
if (slots < 0)
|
|
|
|
return slots;
|
|
|
|
|
|
|
|
asyh->dp.tu = slots;
|
|
|
|
|
|
|
|
return 0;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
2019-11-15 16:07:19 -05:00
|
|
|
static u8
|
|
|
|
nv50_dp_bpc_to_depth(unsigned int bpc)
|
|
|
|
{
|
|
|
|
switch (bpc) {
|
2020-06-20 18:09:59 +10:00
|
|
|
case 6: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444;
|
|
|
|
case 8: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444;
|
2020-07-07 12:36:28 -05:00
|
|
|
case 10:
|
2020-06-20 18:09:59 +10:00
|
|
|
default: return NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444;
|
2019-11-15 16:07:19 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
static void
|
2020-11-13 19:14:10 -05:00
|
|
|
nv50_msto_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
|
2016-11-04 17:20:36 +10:00
|
|
|
{
|
|
|
|
struct nv50_head *head = nv50_head(encoder->crtc);
|
2019-02-01 19:20:04 -05:00
|
|
|
struct nv50_head_atom *armh = nv50_head_atom(head->base.base.state);
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nv50_msto *msto = nv50_msto(encoder);
|
|
|
|
struct nv50_mstc *mstc = NULL;
|
|
|
|
struct nv50_mstm *mstm = NULL;
|
|
|
|
struct drm_connector *connector;
|
2017-05-11 16:10:46 -03:00
|
|
|
struct drm_connector_list_iter conn_iter;
|
2019-11-15 16:07:19 -05:00
|
|
|
u8 proto;
|
2016-11-04 17:20:36 +10:00
|
|
|
bool r;
|
|
|
|
|
2017-05-11 16:10:46 -03:00
|
|
|
drm_connector_list_iter_begin(encoder->dev, &conn_iter);
|
|
|
|
drm_for_each_connector_iter(connector, &conn_iter) {
|
2016-11-04 17:20:36 +10:00
|
|
|
if (connector->state->best_encoder == &msto->encoder) {
|
|
|
|
mstc = nv50_mstc(connector);
|
|
|
|
mstm = mstc->mstm;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2017-05-11 16:10:46 -03:00
|
|
|
drm_connector_list_iter_end(&conn_iter);
|
2016-11-04 17:20:36 +10:00
|
|
|
|
|
|
|
if (WARN_ON(!mstc))
|
|
|
|
return;
|
|
|
|
|
2019-02-01 19:20:04 -05:00
|
|
|
r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, armh->dp.pbn,
|
|
|
|
armh->dp.tu);
|
2019-01-28 16:03:50 -05:00
|
|
|
if (!r)
|
|
|
|
DRM_DEBUG_KMS("Failed to allocate VCPI\n");
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2017-05-19 23:59:35 +10:00
|
|
|
if (!mstm->links++)
|
2020-06-03 11:37:56 +10:00
|
|
|
nv50_outp_acquire(mstm->outp, false /*XXX: MST audio.*/);
|
2017-05-19 23:59:35 +10:00
|
|
|
|
|
|
|
if (mstm->outp->link & 1)
|
2020-06-20 18:09:59 +10:00
|
|
|
proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_A;
|
2016-11-04 17:20:36 +10:00
|
|
|
else
|
2020-06-20 18:09:59 +10:00
|
|
|
proto = NV917D_SOR_SET_CONTROL_PROTOCOL_DP_B;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2019-11-15 16:07:19 -05:00
|
|
|
mstm->outp->update(mstm->outp, head->base.index, armh, proto,
|
|
|
|
nv50_dp_bpc_to_depth(armh->or.bpc));
|
2016-11-04 17:20:36 +10:00
|
|
|
|
|
|
|
msto->mstc = mstc;
|
|
|
|
mstm->modified = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-11-13 19:14:10 -05:00
|
|
|
nv50_msto_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
|
2016-11-04 17:20:36 +10:00
|
|
|
{
|
|
|
|
struct nv50_msto *msto = nv50_msto(encoder);
|
|
|
|
struct nv50_mstc *mstc = msto->mstc;
|
|
|
|
struct nv50_mstm *mstm = mstc->mstm;
|
|
|
|
|
2019-01-10 19:53:38 -05:00
|
|
|
drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
|
2016-11-04 17:20:36 +10:00
|
|
|
|
|
|
|
mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
|
|
|
|
mstm->modified = true;
|
2017-05-19 23:59:35 +10:00
|
|
|
if (!--mstm->links)
|
|
|
|
mstm->disabled = true;
|
2016-11-04 17:20:36 +10:00
|
|
|
msto->disabled = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_encoder_helper_funcs
|
|
|
|
nv50_msto_help = {
|
2020-11-13 19:14:10 -05:00
|
|
|
.atomic_disable = nv50_msto_disable,
|
|
|
|
.atomic_enable = nv50_msto_enable,
|
2016-11-04 17:20:36 +10:00
|
|
|
.atomic_check = nv50_msto_atomic_check,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_msto_destroy(struct drm_encoder *encoder)
|
|
|
|
{
|
|
|
|
struct nv50_msto *msto = nv50_msto(encoder);
|
|
|
|
drm_encoder_cleanup(&msto->encoder);
|
|
|
|
kfree(msto);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_encoder_funcs
|
|
|
|
nv50_msto = {
|
|
|
|
.destroy = nv50_msto_destroy,
|
|
|
|
};
|
|
|
|
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
static struct nv50_msto *
|
|
|
|
nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id)
|
2016-11-04 17:20:36 +10:00
|
|
|
{
|
|
|
|
struct nv50_msto *msto;
|
|
|
|
int ret;
|
|
|
|
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
msto = kzalloc(sizeof(*msto), GFP_KERNEL);
|
|
|
|
if (!msto)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2016-11-04 17:20:36 +10:00
|
|
|
|
|
|
|
ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
DRM_MODE_ENCODER_DPMST, "mst-%d", id);
|
2016-11-04 17:20:36 +10:00
|
|
|
if (ret) {
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
kfree(msto);
|
|
|
|
return ERR_PTR(ret);
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
msto->encoder.possible_crtcs = drm_crtc_mask(&head->base.base);
|
|
|
|
msto->head = head;
|
|
|
|
return msto;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct drm_encoder *
|
|
|
|
nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
|
drm: Pass the full state to connectors atomic functions
The current atomic helpers have either their object state being passed as
an argument or the full atomic state.
The former is the pattern that was done at first, before switching to the
latter for new hooks or when it was needed.
Now that the CRTCs have been converted, let's move forward with the
connectors to provide a consistent interface.
The conversion was done using the coccinelle script below, and built tested
on all the drivers.
@@
identifier connector, connector_state;
@@
struct drm_connector_helper_funcs {
...
struct drm_encoder* (*atomic_best_encoder)(struct drm_connector *connector,
- struct drm_connector_state *connector_state);
+ struct drm_atomic_state *state);
...
}
@@
identifier connector, connector_state;
@@
struct drm_connector_helper_funcs {
...
void (*atomic_commit)(struct drm_connector *connector,
- struct drm_connector_state *connector_state);
+ struct drm_atomic_state *state);
...
}
@@
struct drm_connector_helper_funcs *FUNCS;
identifier state;
identifier connector, connector_state;
identifier f;
@@
f(..., struct drm_atomic_state *state, ...)
{
<+...
- FUNCS->atomic_commit(connector, connector_state);
+ FUNCS->atomic_commit(connector, state);
...+>
}
@@
struct drm_connector_helper_funcs *FUNCS;
identifier state;
identifier connector, connector_state;
identifier var, f;
@@
f(struct drm_atomic_state *state, ...)
{
<+...
- var = FUNCS->atomic_best_encoder(connector, connector_state);
+ var = FUNCS->atomic_best_encoder(connector, state);
...+>
}
@ connector_atomic_func @
identifier helpers;
identifier func;
@@
(
static struct drm_connector_helper_funcs helpers = {
...,
.atomic_best_encoder = func,
...,
};
|
static struct drm_connector_helper_funcs helpers = {
...,
.atomic_commit = func,
...,
};
)
@@
identifier connector_atomic_func.func;
identifier connector;
symbol state;
@@
func(struct drm_connector *connector,
- struct drm_connector_state *state
+ struct drm_connector_state *connector_state
)
{
...
- state
+ connector_state
...
}
@ ignores_state @
identifier connector_atomic_func.func;
identifier connector, connector_state;
@@
func(struct drm_connector *connector,
struct drm_connector_state *connector_state)
{
... when != connector_state
}
@ adds_state depends on connector_atomic_func && !ignores_state @
identifier connector_atomic_func.func;
identifier connector, connector_state;
@@
func(struct drm_connector *connector, struct drm_connector_state *connector_state)
{
+ struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, connector);
...
}
@ depends on connector_atomic_func @
identifier connector_atomic_func.func;
identifier connector_state;
identifier connector;
@@
func(struct drm_connector *connector,
- struct drm_connector_state *connector_state
+ struct drm_atomic_state *state
)
{ ... }
@ include depends on adds_state @
@@
#include <drm/drm_atomic.h>
@ no_include depends on !include && adds_state @
@@
+ #include <drm/drm_atomic.h>
#include <drm/...>
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Reviewed-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Cc: Leo Li <sunpeng.li@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: Melissa Wen <melissa.srw@gmail.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201118094758.506730-1-maxime@cerno.tech
2020-11-18 10:47:58 +01:00
|
|
|
struct drm_atomic_state *state)
|
2016-11-04 17:20:36 +10:00
|
|
|
{
|
drm: Pass the full state to connectors atomic functions
The current atomic helpers have either their object state being passed as
an argument or the full atomic state.
The former is the pattern that was done at first, before switching to the
latter for new hooks or when it was needed.
Now that the CRTCs have been converted, let's move forward with the
connectors to provide a consistent interface.
The conversion was done using the coccinelle script below, and built tested
on all the drivers.
@@
identifier connector, connector_state;
@@
struct drm_connector_helper_funcs {
...
struct drm_encoder* (*atomic_best_encoder)(struct drm_connector *connector,
- struct drm_connector_state *connector_state);
+ struct drm_atomic_state *state);
...
}
@@
identifier connector, connector_state;
@@
struct drm_connector_helper_funcs {
...
void (*atomic_commit)(struct drm_connector *connector,
- struct drm_connector_state *connector_state);
+ struct drm_atomic_state *state);
...
}
@@
struct drm_connector_helper_funcs *FUNCS;
identifier state;
identifier connector, connector_state;
identifier f;
@@
f(..., struct drm_atomic_state *state, ...)
{
<+...
- FUNCS->atomic_commit(connector, connector_state);
+ FUNCS->atomic_commit(connector, state);
...+>
}
@@
struct drm_connector_helper_funcs *FUNCS;
identifier state;
identifier connector, connector_state;
identifier var, f;
@@
f(struct drm_atomic_state *state, ...)
{
<+...
- var = FUNCS->atomic_best_encoder(connector, connector_state);
+ var = FUNCS->atomic_best_encoder(connector, state);
...+>
}
@ connector_atomic_func @
identifier helpers;
identifier func;
@@
(
static struct drm_connector_helper_funcs helpers = {
...,
.atomic_best_encoder = func,
...,
};
|
static struct drm_connector_helper_funcs helpers = {
...,
.atomic_commit = func,
...,
};
)
@@
identifier connector_atomic_func.func;
identifier connector;
symbol state;
@@
func(struct drm_connector *connector,
- struct drm_connector_state *state
+ struct drm_connector_state *connector_state
)
{
...
- state
+ connector_state
...
}
@ ignores_state @
identifier connector_atomic_func.func;
identifier connector, connector_state;
@@
func(struct drm_connector *connector,
struct drm_connector_state *connector_state)
{
... when != connector_state
}
@ adds_state depends on connector_atomic_func && !ignores_state @
identifier connector_atomic_func.func;
identifier connector, connector_state;
@@
func(struct drm_connector *connector, struct drm_connector_state *connector_state)
{
+ struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, connector);
...
}
@ depends on connector_atomic_func @
identifier connector_atomic_func.func;
identifier connector_state;
identifier connector;
@@
func(struct drm_connector *connector,
- struct drm_connector_state *connector_state
+ struct drm_atomic_state *state
)
{ ... }
@ include depends on adds_state @
@@
#include <drm/drm_atomic.h>
@ no_include depends on !include && adds_state @
@@
+ #include <drm/drm_atomic.h>
#include <drm/...>
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Reviewed-by: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
Acked-by: Harry Wentland <harry.wentland@amd.com>
Cc: Leo Li <sunpeng.li@amd.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Ben Skeggs <bskeggs@redhat.com>
Cc: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
Cc: Melissa Wen <melissa.srw@gmail.com>
Cc: Haneen Mohammed <hamohammed.sa@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201118094758.506730-1-maxime@cerno.tech
2020-11-18 10:47:58 +01:00
|
|
|
struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
|
|
|
|
connector);
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nv50_mstc *mstc = nv50_mstc(connector);
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
struct drm_crtc *crtc = connector_state->crtc;
|
2018-10-08 19:24:31 -04:00
|
|
|
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
if (!(mstc->mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return &nv50_head(crtc)->msto->encoder;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static enum drm_mode_status
|
|
|
|
nv50_mstc_mode_valid(struct drm_connector *connector,
|
|
|
|
struct drm_display_mode *mode)
|
|
|
|
{
|
2020-05-11 18:41:27 -04:00
|
|
|
struct nv50_mstc *mstc = nv50_mstc(connector);
|
|
|
|
struct nouveau_encoder *outp = mstc->mstm->outp;
|
|
|
|
|
|
|
|
/* TODO: calculate the PBN from the dotclock and validate against the
|
|
|
|
* MSTB's max possible PBN
|
|
|
|
*/
|
|
|
|
|
|
|
|
return nv50_dp_mode_valid(connector, outp, mode, NULL);
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_mstc_get_modes(struct drm_connector *connector)
|
|
|
|
{
|
|
|
|
struct nv50_mstc *mstc = nv50_mstc(connector);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
|
2018-07-09 10:40:06 +02:00
|
|
|
drm_connector_update_edid_property(&mstc->connector, mstc->edid);
|
2017-11-01 16:21:02 +02:00
|
|
|
if (mstc->edid)
|
2016-11-04 17:20:36 +10:00
|
|
|
ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
|
|
|
|
|
2020-05-11 18:41:26 -04:00
|
|
|
/*
|
|
|
|
* XXX: Since we don't use HDR in userspace quite yet, limit the bpc
|
|
|
|
* to 8 to save bandwidth on the topology. In the future, we'll want
|
|
|
|
* to properly fix this by dynamically selecting the highest possible
|
|
|
|
* bpc that would fit in the topology
|
|
|
|
*/
|
|
|
|
if (connector->display_info.bpc)
|
|
|
|
connector->display_info.bpc =
|
|
|
|
clamp(connector->display_info.bpc, 6U, 8U);
|
|
|
|
else
|
|
|
|
connector->display_info.bpc = 8;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
|
|
|
if (mstc->native)
|
|
|
|
drm_mode_destroy(mstc->connector.dev, mstc->native);
|
|
|
|
mstc->native = nouveau_conn_native_mode(&mstc->connector);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-10 19:53:43 -05:00
|
|
|
static int
|
|
|
|
nv50_mstc_atomic_check(struct drm_connector *connector,
|
2019-06-11 12:08:18 -04:00
|
|
|
struct drm_atomic_state *state)
|
2019-01-10 19:53:43 -05:00
|
|
|
{
|
|
|
|
struct nv50_mstc *mstc = nv50_mstc(connector);
|
|
|
|
struct drm_dp_mst_topology_mgr *mgr = &mstc->mstm->mgr;
|
2019-06-11 12:08:18 -04:00
|
|
|
struct drm_connector_state *new_conn_state =
|
|
|
|
drm_atomic_get_new_connector_state(state, connector);
|
2019-01-10 19:53:43 -05:00
|
|
|
struct drm_connector_state *old_conn_state =
|
|
|
|
drm_atomic_get_old_connector_state(state, connector);
|
|
|
|
struct drm_crtc_state *crtc_state;
|
|
|
|
struct drm_crtc *new_crtc = new_conn_state->crtc;
|
|
|
|
|
|
|
|
if (!old_conn_state->crtc)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* We only want to free VCPI if this state disables the CRTC on this
|
|
|
|
* connector
|
|
|
|
*/
|
|
|
|
if (new_crtc) {
|
|
|
|
crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
|
|
|
|
|
|
|
|
if (!crtc_state ||
|
|
|
|
!drm_atomic_crtc_needs_modeset(crtc_state) ||
|
|
|
|
crtc_state->enable)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port);
|
|
|
|
}
|
|
|
|
|
drm/dp_mst: Protect drm_dp_mst_port members with locking
This is a complicated one. Essentially, there's currently a problem in the MST
core that hasn't really caused any issues that we're aware of (emphasis on "that
we're aware of"): locking.
When we go through and probe the link addresses and path resources in a
topology, we hold no locks when updating ports with said information. The
members I'm referring to in particular are:
- ldps
- ddps
- mcs
- pdt
- dpcd_rev
- num_sdp_streams
- num_sdp_stream_sinks
- available_pbn
- input
- connector
Now that we're handling UP requests asynchronously and will be using some of
the struct members mentioned above in atomic modesetting in the future for
features such as PBN validation, this is going to become a lot more important.
As well, the next few commits that prepare us for and introduce suspend/resume
reprobing will also need clear locking in order to prevent from additional
racing hilarities that we never could have hit in the past.
So, let's solve this issue by using &mgr->base.lock, the modesetting
lock which currently only protects &mgr->base.state. This works
perfectly because it allows us to avoid blocking connection_mutex
unnecessarily, and we can grab this in connector detection paths since
it's a ww mutex. We start by having drm_dp_mst_handle_up_req() hold this
when updating ports. For drm_dp_mst_handle_link_address_port() things
are a bit more complicated. As I've learned the hard way, we can grab
&mgr->lock.base for everything except for port->connector. See, our
normal driver probing paths end up generating this rather obvious
lockdep chain:
&drm->mode_config.mutex
-> crtc_ww_class_mutex/crtc_ww_class_acquire
-> &connector->mutex
However, sysfs grabs &drm->mode_config.mutex in order to protect itself
from connector state changing under it. Because this entails grabbing
kn->count, e.g. the lock that the kernel provides for protecting sysfs
contexts, we end up grabbing kn->count followed by
&drm->mode_config.mutex. This ends up creating an extremely rude chain:
&kn->count
-> &drm->mode_config.mutex
-> crtc_ww_class_mutex/crtc_ww_class_acquire
-> &connector->mutex
I mean, look at that thing! It's just evil!!! This gross thing ends up
making any calls to drm_connector_register()/drm_connector_unregister()
impossible when holding any kind of modesetting lock. This is annoying
because ideally, we always want to ensure that
drm_dp_mst_port->connector never changes when doing an atomic commit or
check that would affect the atomic topology state so that it can
reliably and easily be used from future DRM DP MST helpers to assist
with tasks such as scanning through the current VCPI allocations and
adding connectors which need to have their allocations updated in
response to a bandwidth change or the like.
Being able to hold &mgr->base.lock throughout the entire link probe
process would have been _great_, since we could prevent userspace from
ever seeing any states in-between individual port changes and as a
result likely end up with a much faster probe and more consistent
results from said probes. But without some rework of how we handle
connector probing in sysfs it's not at all currently possible. In the
future, maybe we can try using the sysfs locks to protect updates to
connector probing state and fix this mess.
So for now, to protect everything other than port->connector under
&mgr->base.lock and ensure that we still have the guarantee that atomic
check/commit contexts will never see port->connector change we use a
silly trick. See: port->connector only needs to change in order to
ensure that input ports (see the MST spec) never have a ghost connector
associated with them. But, there's nothing stopping us from simply
throwing the entire port out and creating a new one in order to maintain
that requirement while still keeping port->connector consistent across
the lifetime of the port in atomic check/commit contexts. For all
intended purposes this works fine, as we validate ports in any contexts
we care about before using them and as such will end up reporting the
connector as disconnected until it's port's destruction finalizes. So,
we just do that in cases where we detect port->input has transitioned
from true->false. We don't need to worry about the other direction,
since a port without a connector isn't visible to userspace and as such
doesn't need to be protected by &mgr->base.lock until we finish
registering a connector for it.
For updating members of drm_dp_mst_port other than port->connector, we
simply grab &mgr->base.lock in drm_dp_mst_link_probe_work() for already
registered ports, update said members and drop the lock before
potentially registering a connector and probing the link address of it's
children.
Finally, we modify drm_dp_mst_detect_port() to take a modesetting lock
acquisition context in order to acquire &mgr->base.lock under
&connection_mutex and convert all it's users over to using the
.detect_ctx probe hooks.
With that, we finally have well defined locking.
Changes since v4:
* Get rid of port->mutex, stop using connection_mutex and just use our own
modesetting lock - mgr->base.lock. Also, add a probe_lock that comes
before this patch.
* Just throw out ports that get changed from an output to an input, and
replace them with new ports. This lets us ensure that modesetting
contexts never see port->connector go from having a connector to being
NULL.
* Write an extremely detailed explanation of what problems this is
trying to fix, since there's a _lot_ of context here and I honestly
forgot some of it myself a couple times.
* Don't grab mgr->lock when reading port->mstb in
drm_dp_mst_handle_link_address_port(). It's not needed.
Cc: Juston Li <juston.li@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Harry Wentland <hwentlan@amd.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Sean Paul <sean@poorly.run>
Signed-off-by: Lyude Paul <lyude@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191022023641.8026-7-lyude@redhat.com
2019-06-17 17:59:29 -04:00
|
|
|
static int
|
|
|
|
nv50_mstc_detect(struct drm_connector *connector,
|
|
|
|
struct drm_modeset_acquire_ctx *ctx, bool force)
|
2016-11-04 17:20:36 +10:00
|
|
|
{
|
|
|
|
struct nv50_mstc *mstc = nv50_mstc(connector);
|
2018-09-14 16:44:03 -04:00
|
|
|
int ret;
|
|
|
|
|
2019-01-10 19:53:38 -05:00
|
|
|
if (drm_connector_is_unregistered(connector))
|
2016-11-04 17:20:36 +10:00
|
|
|
return connector_status_disconnected;
|
2018-09-14 16:44:03 -04:00
|
|
|
|
|
|
|
ret = pm_runtime_get_sync(connector->dev->dev);
|
2020-05-20 18:47:48 +08:00
|
|
|
if (ret < 0 && ret != -EACCES) {
|
|
|
|
pm_runtime_put_autosuspend(connector->dev->dev);
|
2018-09-14 16:44:03 -04:00
|
|
|
return connector_status_disconnected;
|
2020-05-20 18:47:48 +08:00
|
|
|
}
|
2018-09-14 16:44:03 -04:00
|
|
|
|
drm/dp_mst: Protect drm_dp_mst_port members with locking
This is a complicated one. Essentially, there's currently a problem in the MST
core that hasn't really caused any issues that we're aware of (emphasis on "that
we're aware of"): locking.
When we go through and probe the link addresses and path resources in a
topology, we hold no locks when updating ports with said information. The
members I'm referring to in particular are:
- ldps
- ddps
- mcs
- pdt
- dpcd_rev
- num_sdp_streams
- num_sdp_stream_sinks
- available_pbn
- input
- connector
Now that we're handling UP requests asynchronously and will be using some of
the struct members mentioned above in atomic modesetting in the future for
features such as PBN validation, this is going to become a lot more important.
As well, the next few commits that prepare us for and introduce suspend/resume
reprobing will also need clear locking in order to prevent from additional
racing hilarities that we never could have hit in the past.
So, let's solve this issue by using &mgr->base.lock, the modesetting
lock which currently only protects &mgr->base.state. This works
perfectly because it allows us to avoid blocking connection_mutex
unnecessarily, and we can grab this in connector detection paths since
it's a ww mutex. We start by having drm_dp_mst_handle_up_req() hold this
when updating ports. For drm_dp_mst_handle_link_address_port() things
are a bit more complicated. As I've learned the hard way, we can grab
&mgr->lock.base for everything except for port->connector. See, our
normal driver probing paths end up generating this rather obvious
lockdep chain:
&drm->mode_config.mutex
-> crtc_ww_class_mutex/crtc_ww_class_acquire
-> &connector->mutex
However, sysfs grabs &drm->mode_config.mutex in order to protect itself
from connector state changing under it. Because this entails grabbing
kn->count, e.g. the lock that the kernel provides for protecting sysfs
contexts, we end up grabbing kn->count followed by
&drm->mode_config.mutex. This ends up creating an extremely rude chain:
&kn->count
-> &drm->mode_config.mutex
-> crtc_ww_class_mutex/crtc_ww_class_acquire
-> &connector->mutex
I mean, look at that thing! It's just evil!!! This gross thing ends up
making any calls to drm_connector_register()/drm_connector_unregister()
impossible when holding any kind of modesetting lock. This is annoying
because ideally, we always want to ensure that
drm_dp_mst_port->connector never changes when doing an atomic commit or
check that would affect the atomic topology state so that it can
reliably and easily be used from future DRM DP MST helpers to assist
with tasks such as scanning through the current VCPI allocations and
adding connectors which need to have their allocations updated in
response to a bandwidth change or the like.
Being able to hold &mgr->base.lock throughout the entire link probe
process would have been _great_, since we could prevent userspace from
ever seeing any states in-between individual port changes and as a
result likely end up with a much faster probe and more consistent
results from said probes. But without some rework of how we handle
connector probing in sysfs it's not at all currently possible. In the
future, maybe we can try using the sysfs locks to protect updates to
connector probing state and fix this mess.
So for now, to protect everything other than port->connector under
&mgr->base.lock and ensure that we still have the guarantee that atomic
check/commit contexts will never see port->connector change we use a
silly trick. See: port->connector only needs to change in order to
ensure that input ports (see the MST spec) never have a ghost connector
associated with them. But, there's nothing stopping us from simply
throwing the entire port out and creating a new one in order to maintain
that requirement while still keeping port->connector consistent across
the lifetime of the port in atomic check/commit contexts. For all
intended purposes this works fine, as we validate ports in any contexts
we care about before using them and as such will end up reporting the
connector as disconnected until it's port's destruction finalizes. So,
we just do that in cases where we detect port->input has transitioned
from true->false. We don't need to worry about the other direction,
since a port without a connector isn't visible to userspace and as such
doesn't need to be protected by &mgr->base.lock until we finish
registering a connector for it.
For updating members of drm_dp_mst_port other than port->connector, we
simply grab &mgr->base.lock in drm_dp_mst_link_probe_work() for already
registered ports, update said members and drop the lock before
potentially registering a connector and probing the link address of it's
children.
Finally, we modify drm_dp_mst_detect_port() to take a modesetting lock
acquisition context in order to acquire &mgr->base.lock under
&connection_mutex and convert all it's users over to using the
.detect_ctx probe hooks.
With that, we finally have well defined locking.
Changes since v4:
* Get rid of port->mutex, stop using connection_mutex and just use our own
modesetting lock - mgr->base.lock. Also, add a probe_lock that comes
before this patch.
* Just throw out ports that get changed from an output to an input, and
replace them with new ports. This lets us ensure that modesetting
contexts never see port->connector go from having a connector to being
NULL.
* Write an extremely detailed explanation of what problems this is
trying to fix, since there's a _lot_ of context here and I honestly
forgot some of it myself a couple times.
* Don't grab mgr->lock when reading port->mstb in
drm_dp_mst_handle_link_address_port(). It's not needed.
Cc: Juston Li <juston.li@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Harry Wentland <hwentlan@amd.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Sean Paul <sean@poorly.run>
Signed-off-by: Lyude Paul <lyude@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191022023641.8026-7-lyude@redhat.com
2019-06-17 17:59:29 -04:00
|
|
|
ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
|
|
|
|
mstc->port);
|
2020-08-26 14:24:50 -04:00
|
|
|
if (ret != connector_status_connected)
|
|
|
|
goto out;
|
2018-09-14 16:44:03 -04:00
|
|
|
|
2020-08-26 14:24:50 -04:00
|
|
|
out:
|
2018-09-14 16:44:03 -04:00
|
|
|
pm_runtime_mark_last_busy(connector->dev->dev);
|
|
|
|
pm_runtime_put_autosuspend(connector->dev->dev);
|
drm/dp_mst: Protect drm_dp_mst_port members with locking
This is a complicated one. Essentially, there's currently a problem in the MST
core that hasn't really caused any issues that we're aware of (emphasis on "that
we're aware of"): locking.
When we go through and probe the link addresses and path resources in a
topology, we hold no locks when updating ports with said information. The
members I'm referring to in particular are:
- ldps
- ddps
- mcs
- pdt
- dpcd_rev
- num_sdp_streams
- num_sdp_stream_sinks
- available_pbn
- input
- connector
Now that we're handling UP requests asynchronously and will be using some of
the struct members mentioned above in atomic modesetting in the future for
features such as PBN validation, this is going to become a lot more important.
As well, the next few commits that prepare us for and introduce suspend/resume
reprobing will also need clear locking in order to prevent from additional
racing hilarities that we never could have hit in the past.
So, let's solve this issue by using &mgr->base.lock, the modesetting
lock which currently only protects &mgr->base.state. This works
perfectly because it allows us to avoid blocking connection_mutex
unnecessarily, and we can grab this in connector detection paths since
it's a ww mutex. We start by having drm_dp_mst_handle_up_req() hold this
when updating ports. For drm_dp_mst_handle_link_address_port() things
are a bit more complicated. As I've learned the hard way, we can grab
&mgr->lock.base for everything except for port->connector. See, our
normal driver probing paths end up generating this rather obvious
lockdep chain:
&drm->mode_config.mutex
-> crtc_ww_class_mutex/crtc_ww_class_acquire
-> &connector->mutex
However, sysfs grabs &drm->mode_config.mutex in order to protect itself
from connector state changing under it. Because this entails grabbing
kn->count, e.g. the lock that the kernel provides for protecting sysfs
contexts, we end up grabbing kn->count followed by
&drm->mode_config.mutex. This ends up creating an extremely rude chain:
&kn->count
-> &drm->mode_config.mutex
-> crtc_ww_class_mutex/crtc_ww_class_acquire
-> &connector->mutex
I mean, look at that thing! It's just evil!!! This gross thing ends up
making any calls to drm_connector_register()/drm_connector_unregister()
impossible when holding any kind of modesetting lock. This is annoying
because ideally, we always want to ensure that
drm_dp_mst_port->connector never changes when doing an atomic commit or
check that would affect the atomic topology state so that it can
reliably and easily be used from future DRM DP MST helpers to assist
with tasks such as scanning through the current VCPI allocations and
adding connectors which need to have their allocations updated in
response to a bandwidth change or the like.
Being able to hold &mgr->base.lock throughout the entire link probe
process would have been _great_, since we could prevent userspace from
ever seeing any states in-between individual port changes and as a
result likely end up with a much faster probe and more consistent
results from said probes. But without some rework of how we handle
connector probing in sysfs it's not at all currently possible. In the
future, maybe we can try using the sysfs locks to protect updates to
connector probing state and fix this mess.
So for now, to protect everything other than port->connector under
&mgr->base.lock and ensure that we still have the guarantee that atomic
check/commit contexts will never see port->connector change we use a
silly trick. See: port->connector only needs to change in order to
ensure that input ports (see the MST spec) never have a ghost connector
associated with them. But, there's nothing stopping us from simply
throwing the entire port out and creating a new one in order to maintain
that requirement while still keeping port->connector consistent across
the lifetime of the port in atomic check/commit contexts. For all
intended purposes this works fine, as we validate ports in any contexts
we care about before using them and as such will end up reporting the
connector as disconnected until it's port's destruction finalizes. So,
we just do that in cases where we detect port->input has transitioned
from true->false. We don't need to worry about the other direction,
since a port without a connector isn't visible to userspace and as such
doesn't need to be protected by &mgr->base.lock until we finish
registering a connector for it.
For updating members of drm_dp_mst_port other than port->connector, we
simply grab &mgr->base.lock in drm_dp_mst_link_probe_work() for already
registered ports, update said members and drop the lock before
potentially registering a connector and probing the link address of it's
children.
Finally, we modify drm_dp_mst_detect_port() to take a modesetting lock
acquisition context in order to acquire &mgr->base.lock under
&connection_mutex and convert all it's users over to using the
.detect_ctx probe hooks.
With that, we finally have well defined locking.
Changes since v4:
* Get rid of port->mutex, stop using connection_mutex and just use our own
modesetting lock - mgr->base.lock. Also, add a probe_lock that comes
before this patch.
* Just throw out ports that get changed from an output to an input, and
replace them with new ports. This lets us ensure that modesetting
contexts never see port->connector go from having a connector to being
NULL.
* Write an extremely detailed explanation of what problems this is
trying to fix, since there's a _lot_ of context here and I honestly
forgot some of it myself a couple times.
* Don't grab mgr->lock when reading port->mstb in
drm_dp_mst_handle_link_address_port(). It's not needed.
Cc: Juston Li <juston.li@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Harry Wentland <hwentlan@amd.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Sean Paul <sean@poorly.run>
Signed-off-by: Lyude Paul <lyude@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191022023641.8026-7-lyude@redhat.com
2019-06-17 17:59:29 -04:00
|
|
|
return ret;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
drm/dp_mst: Protect drm_dp_mst_port members with locking
This is a complicated one. Essentially, there's currently a problem in the MST
core that hasn't really caused any issues that we're aware of (emphasis on "that
we're aware of"): locking.
When we go through and probe the link addresses and path resources in a
topology, we hold no locks when updating ports with said information. The
members I'm referring to in particular are:
- ldps
- ddps
- mcs
- pdt
- dpcd_rev
- num_sdp_streams
- num_sdp_stream_sinks
- available_pbn
- input
- connector
Now that we're handling UP requests asynchronously and will be using some of
the struct members mentioned above in atomic modesetting in the future for
features such as PBN validation, this is going to become a lot more important.
As well, the next few commits that prepare us for and introduce suspend/resume
reprobing will also need clear locking in order to prevent from additional
racing hilarities that we never could have hit in the past.
So, let's solve this issue by using &mgr->base.lock, the modesetting
lock which currently only protects &mgr->base.state. This works
perfectly because it allows us to avoid blocking connection_mutex
unnecessarily, and we can grab this in connector detection paths since
it's a ww mutex. We start by having drm_dp_mst_handle_up_req() hold this
when updating ports. For drm_dp_mst_handle_link_address_port() things
are a bit more complicated. As I've learned the hard way, we can grab
&mgr->lock.base for everything except for port->connector. See, our
normal driver probing paths end up generating this rather obvious
lockdep chain:
&drm->mode_config.mutex
-> crtc_ww_class_mutex/crtc_ww_class_acquire
-> &connector->mutex
However, sysfs grabs &drm->mode_config.mutex in order to protect itself
from connector state changing under it. Because this entails grabbing
kn->count, e.g. the lock that the kernel provides for protecting sysfs
contexts, we end up grabbing kn->count followed by
&drm->mode_config.mutex. This ends up creating an extremely rude chain:
&kn->count
-> &drm->mode_config.mutex
-> crtc_ww_class_mutex/crtc_ww_class_acquire
-> &connector->mutex
I mean, look at that thing! It's just evil!!! This gross thing ends up
making any calls to drm_connector_register()/drm_connector_unregister()
impossible when holding any kind of modesetting lock. This is annoying
because ideally, we always want to ensure that
drm_dp_mst_port->connector never changes when doing an atomic commit or
check that would affect the atomic topology state so that it can
reliably and easily be used from future DRM DP MST helpers to assist
with tasks such as scanning through the current VCPI allocations and
adding connectors which need to have their allocations updated in
response to a bandwidth change or the like.
Being able to hold &mgr->base.lock throughout the entire link probe
process would have been _great_, since we could prevent userspace from
ever seeing any states in-between individual port changes and as a
result likely end up with a much faster probe and more consistent
results from said probes. But without some rework of how we handle
connector probing in sysfs it's not at all currently possible. In the
future, maybe we can try using the sysfs locks to protect updates to
connector probing state and fix this mess.
So for now, to protect everything other than port->connector under
&mgr->base.lock and ensure that we still have the guarantee that atomic
check/commit contexts will never see port->connector change we use a
silly trick. See: port->connector only needs to change in order to
ensure that input ports (see the MST spec) never have a ghost connector
associated with them. But, there's nothing stopping us from simply
throwing the entire port out and creating a new one in order to maintain
that requirement while still keeping port->connector consistent across
the lifetime of the port in atomic check/commit contexts. For all
intended purposes this works fine, as we validate ports in any contexts
we care about before using them and as such will end up reporting the
connector as disconnected until it's port's destruction finalizes. So,
we just do that in cases where we detect port->input has transitioned
from true->false. We don't need to worry about the other direction,
since a port without a connector isn't visible to userspace and as such
doesn't need to be protected by &mgr->base.lock until we finish
registering a connector for it.
For updating members of drm_dp_mst_port other than port->connector, we
simply grab &mgr->base.lock in drm_dp_mst_link_probe_work() for already
registered ports, update said members and drop the lock before
potentially registering a connector and probing the link address of it's
children.
Finally, we modify drm_dp_mst_detect_port() to take a modesetting lock
acquisition context in order to acquire &mgr->base.lock under
&connection_mutex and convert all it's users over to using the
.detect_ctx probe hooks.
With that, we finally have well defined locking.
Changes since v4:
* Get rid of port->mutex, stop using connection_mutex and just use our own
modesetting lock - mgr->base.lock. Also, add a probe_lock that comes
before this patch.
* Just throw out ports that get changed from an output to an input, and
replace them with new ports. This lets us ensure that modesetting
contexts never see port->connector go from having a connector to being
NULL.
* Write an extremely detailed explanation of what problems this is
trying to fix, since there's a _lot_ of context here and I honestly
forgot some of it myself a couple times.
* Don't grab mgr->lock when reading port->mstb in
drm_dp_mst_handle_link_address_port(). It's not needed.
Cc: Juston Li <juston.li@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Harry Wentland <hwentlan@amd.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Sean Paul <sean@poorly.run>
Signed-off-by: Lyude Paul <lyude@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191022023641.8026-7-lyude@redhat.com
2019-06-17 17:59:29 -04:00
|
|
|
static const struct drm_connector_helper_funcs
|
|
|
|
nv50_mstc_help = {
|
|
|
|
.get_modes = nv50_mstc_get_modes,
|
|
|
|
.mode_valid = nv50_mstc_mode_valid,
|
|
|
|
.atomic_best_encoder = nv50_mstc_atomic_best_encoder,
|
|
|
|
.atomic_check = nv50_mstc_atomic_check,
|
|
|
|
.detect_ctx = nv50_mstc_detect,
|
|
|
|
};
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
static void
|
|
|
|
nv50_mstc_destroy(struct drm_connector *connector)
|
|
|
|
{
|
|
|
|
struct nv50_mstc *mstc = nv50_mstc(connector);
|
2019-01-10 19:53:37 -05:00
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
drm_connector_cleanup(&mstc->connector);
|
2019-01-10 19:53:38 -05:00
|
|
|
drm_dp_mst_put_port_malloc(mstc->port);
|
2019-01-10 19:53:37 -05:00
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
kfree(mstc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_connector_funcs
|
|
|
|
nv50_mstc = {
|
|
|
|
.reset = nouveau_conn_reset,
|
|
|
|
.fill_modes = drm_helper_probe_single_connector_modes,
|
|
|
|
.destroy = nv50_mstc_destroy,
|
|
|
|
.atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
|
|
|
|
.atomic_destroy_state = nouveau_conn_atomic_destroy_state,
|
|
|
|
.atomic_set_property = nouveau_conn_atomic_set_property,
|
|
|
|
.atomic_get_property = nouveau_conn_atomic_get_property,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
|
|
|
|
const char *path, struct nv50_mstc **pmstc)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = mstm->outp->base.base.dev;
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
struct drm_crtc *crtc;
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nv50_mstc *mstc;
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
int ret;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
|
|
|
if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
|
|
|
|
return -ENOMEM;
|
|
|
|
mstc->mstm = mstm;
|
|
|
|
mstc->port = port;
|
|
|
|
|
|
|
|
ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
|
|
|
|
DRM_MODE_CONNECTOR_DisplayPort);
|
|
|
|
if (ret) {
|
|
|
|
kfree(*pmstc);
|
|
|
|
*pmstc = NULL;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
|
|
|
|
|
|
|
|
mstc->connector.funcs->reset(&mstc->connector);
|
|
|
|
nouveau_conn_attach_properties(&mstc->connector);
|
|
|
|
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
drm_for_each_crtc(crtc, dev) {
|
|
|
|
if (!(mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
drm_connector_attach_encoder(&mstc->connector,
|
|
|
|
&nv50_head(crtc)->msto->encoder);
|
|
|
|
}
|
2016-11-04 17:20:36 +10:00
|
|
|
|
|
|
|
drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
|
|
|
|
drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
|
2018-07-09 10:40:08 +02:00
|
|
|
drm_connector_set_path_property(&mstc->connector, path);
|
2019-01-10 19:53:37 -05:00
|
|
|
drm_dp_mst_get_port_malloc(port);
|
2016-11-04 17:20:36 +10:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_mstm_cleanup(struct nv50_mstm *mstm)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
|
|
|
|
struct drm_encoder *encoder;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
|
|
|
|
ret = drm_dp_check_act_status(&mstm->mgr);
|
|
|
|
|
|
|
|
ret = drm_dp_update_payload_part2(&mstm->mgr);
|
|
|
|
|
|
|
|
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
|
|
|
|
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
|
|
|
|
struct nv50_msto *msto = nv50_msto(encoder);
|
|
|
|
struct nv50_mstc *mstc = msto->mstc;
|
|
|
|
if (mstc && mstc->mstm == mstm)
|
|
|
|
nv50_msto_cleanup(msto);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mstm->modified = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_mstm_prepare(struct nv50_mstm *mstm)
|
|
|
|
{
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
|
|
|
|
struct drm_encoder *encoder;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
|
|
|
|
ret = drm_dp_update_payload_part1(&mstm->mgr);
|
|
|
|
|
|
|
|
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
|
|
|
|
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
|
|
|
|
struct nv50_msto *msto = nv50_msto(encoder);
|
|
|
|
struct nv50_mstc *mstc = msto->mstc;
|
|
|
|
if (mstc && mstc->mstm == mstm)
|
|
|
|
nv50_msto_prepare(msto);
|
|
|
|
}
|
|
|
|
}
|
2017-05-19 23:59:35 +10:00
|
|
|
|
|
|
|
if (mstm->disabled) {
|
|
|
|
if (!mstm->links)
|
|
|
|
nv50_outp_release(mstm->outp);
|
|
|
|
mstm->disabled = false;
|
|
|
|
}
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct drm_connector *
|
|
|
|
nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
|
|
|
|
struct drm_dp_mst_port *port, const char *path)
|
|
|
|
{
|
|
|
|
struct nv50_mstm *mstm = nv50_mstm(mgr);
|
|
|
|
struct nv50_mstc *mstc;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = nv50_mstc_new(mstm, port, path, &mstc);
|
2019-01-10 19:53:35 -05:00
|
|
|
if (ret)
|
2016-11-04 17:20:36 +10:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return &mstc->connector;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_dp_mst_topology_cbs
|
|
|
|
nv50_mstm = {
|
|
|
|
.add_connector = nv50_mstm_add_connector,
|
|
|
|
};
|
|
|
|
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
bool
|
|
|
|
nv50_mstm_service(struct nouveau_drm *drm,
|
|
|
|
struct nouveau_connector *nv_connector,
|
|
|
|
struct nv50_mstm *mstm)
|
2016-11-04 17:20:36 +10:00
|
|
|
{
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
struct drm_dp_aux *aux = &nv_connector->aux;
|
|
|
|
bool handled = true, ret = true;
|
|
|
|
int rc;
|
2016-11-04 17:20:36 +10:00
|
|
|
u8 esi[8] = {};
|
|
|
|
|
|
|
|
while (handled) {
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
rc = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
|
|
|
|
if (rc != 8) {
|
|
|
|
ret = false;
|
|
|
|
break;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
|
|
|
|
if (!handled)
|
|
|
|
break;
|
|
|
|
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
rc = drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1],
|
|
|
|
3);
|
|
|
|
if (rc != 3) {
|
|
|
|
ret = false;
|
|
|
|
break;
|
|
|
|
}
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
NV_DEBUG(drm, "Failed to handle ESI on %s: %d\n",
|
|
|
|
nv_connector->base.name, rc);
|
|
|
|
|
|
|
|
return ret;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nv50_mstm_remove(struct nv50_mstm *mstm)
|
|
|
|
{
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
mstm->is_mst = false;
|
|
|
|
drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
static int
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
nv50_mstm_enable(struct nv50_mstm *mstm, int state)
|
2016-11-04 17:20:36 +10:00
|
|
|
{
|
|
|
|
struct nouveau_encoder *outp = mstm->outp;
|
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
|
|
|
struct nv50_disp_sor_dp_mst_link_v0 mst;
|
|
|
|
} args = {
|
|
|
|
.base.version = 1,
|
|
|
|
.base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_LINK,
|
|
|
|
.base.hasht = outp->dcb->hasht,
|
|
|
|
.base.hashm = outp->dcb->hashm,
|
|
|
|
.mst.state = state,
|
|
|
|
};
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
|
2018-05-08 20:39:47 +10:00
|
|
|
struct nvif_object *disp = &drm->display->disp.object;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
|
|
|
return nvif_mthd(disp, 0, &args, sizeof(args));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
nv50_mstm_detect(struct nouveau_encoder *outp)
|
2016-11-04 17:20:36 +10:00
|
|
|
{
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
struct nv50_mstm *mstm = outp->dp.mstm;
|
drm/nouveau: Only write DP_MSTM_CTRL when needed
Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST
hub every time it receives a long HPD pulse on DP. This isn't actually
necessary and additionally, has some unintended side effects.
With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to
make it rather likely (1 out of 5 times usually) that bringing up MST
with it's ThinkPad dock will fail and result in sideband messages timing
out in the middle. Afterwards, successive probes don't manage to get the
dock to communicate properly over MST sideband properly.
Many times sideband message timeouts from MST hubs are indicative of
either the source or the sink dropping an ESI event, which can cause
DRM's perspective of the topology's current state to go out of sync with
reality. While it's tough to really know for sure what's happening to
the dock, using userspace tools to write to DP_MSTM_CTRL in the middle
of the MST link probing process does appear to make things flaky. It's
possible that when we write to DP_MSTM_CTRL, the function that gets
triggered to respond in the dock's firmware temporarily puts it in a
state where it might end up not reporting an ESI to the source, or ends
up dropping a sideband message we sent it.
So, to fix this we make it so that when probing an MST topology, we
respect it's current state. If the dock's already enabled, we simply
read DP_MSTM_CTRL and disable the topology if it's value is not what we
expected. Otherwise, we perform the normal MST probing dance. We avoid
taking any action except if the state of the MST topology actually
changes.
This fixes MST sideband message timeouts and detection failures on my
P50 with its ThinkPad dock.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Cc: stable@vger.kernel.org
Cc: Karol Herbst <karolherbst@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-09 18:22:05 -04:00
|
|
|
struct drm_dp_aux *aux;
|
|
|
|
int ret;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
if (!mstm || !mstm->can_mst)
|
2016-11-04 17:20:36 +10:00
|
|
|
return 0;
|
|
|
|
|
drm/nouveau: Only write DP_MSTM_CTRL when needed
Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST
hub every time it receives a long HPD pulse on DP. This isn't actually
necessary and additionally, has some unintended side effects.
With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to
make it rather likely (1 out of 5 times usually) that bringing up MST
with it's ThinkPad dock will fail and result in sideband messages timing
out in the middle. Afterwards, successive probes don't manage to get the
dock to communicate properly over MST sideband properly.
Many times sideband message timeouts from MST hubs are indicative of
either the source or the sink dropping an ESI event, which can cause
DRM's perspective of the topology's current state to go out of sync with
reality. While it's tough to really know for sure what's happening to
the dock, using userspace tools to write to DP_MSTM_CTRL in the middle
of the MST link probing process does appear to make things flaky. It's
possible that when we write to DP_MSTM_CTRL, the function that gets
triggered to respond in the dock's firmware temporarily puts it in a
state where it might end up not reporting an ESI to the source, or ends
up dropping a sideband message we sent it.
So, to fix this we make it so that when probing an MST topology, we
respect it's current state. If the dock's already enabled, we simply
read DP_MSTM_CTRL and disable the topology if it's value is not what we
expected. Otherwise, we perform the normal MST probing dance. We avoid
taking any action except if the state of the MST topology actually
changes.
This fixes MST sideband message timeouts and detection failures on my
P50 with its ThinkPad dock.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Cc: stable@vger.kernel.org
Cc: Karol Herbst <karolherbst@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-09 18:22:05 -04:00
|
|
|
aux = mstm->mgr.aux;
|
|
|
|
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
/* Clear any leftover MST state we didn't set ourselves by first
|
|
|
|
* disabling MST if it was already enabled
|
|
|
|
*/
|
|
|
|
ret = drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
/* And start enabling */
|
|
|
|
ret = nv50_mstm_enable(mstm, true);
|
2016-11-04 17:20:36 +10:00
|
|
|
if (ret)
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
return ret;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, true);
|
|
|
|
if (ret) {
|
|
|
|
nv50_mstm_enable(mstm, false);
|
|
|
|
return ret;
|
|
|
|
}
|
drm/nouveau: Only write DP_MSTM_CTRL when needed
Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST
hub every time it receives a long HPD pulse on DP. This isn't actually
necessary and additionally, has some unintended side effects.
With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to
make it rather likely (1 out of 5 times usually) that bringing up MST
with it's ThinkPad dock will fail and result in sideband messages timing
out in the middle. Afterwards, successive probes don't manage to get the
dock to communicate properly over MST sideband properly.
Many times sideband message timeouts from MST hubs are indicative of
either the source or the sink dropping an ESI event, which can cause
DRM's perspective of the topology's current state to go out of sync with
reality. While it's tough to really know for sure what's happening to
the dock, using userspace tools to write to DP_MSTM_CTRL in the middle
of the MST link probing process does appear to make things flaky. It's
possible that when we write to DP_MSTM_CTRL, the function that gets
triggered to respond in the dock's firmware temporarily puts it in a
state where it might end up not reporting an ESI to the source, or ends
up dropping a sideband message we sent it.
So, to fix this we make it so that when probing an MST topology, we
respect it's current state. If the dock's already enabled, we simply
read DP_MSTM_CTRL and disable the topology if it's value is not what we
expected. Otherwise, we perform the normal MST probing dance. We avoid
taking any action except if the state of the MST topology actually
changes.
This fixes MST sideband message timeouts and detection failures on my
P50 with its ThinkPad dock.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Cc: stable@vger.kernel.org
Cc: Karol Herbst <karolherbst@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-09 18:22:05 -04:00
|
|
|
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
mstm->is_mst = true;
|
|
|
|
return 1;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
static void
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
nv50_mstm_fini(struct nouveau_encoder *outp)
|
2016-11-04 17:20:36 +10:00
|
|
|
{
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
struct nv50_mstm *mstm = outp->dp.mstm;
|
|
|
|
|
|
|
|
if (!mstm)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Don't change the MST state of this connector until we've finished
|
|
|
|
* resuming, since we can't safely grab hpd_irq_lock in our resume
|
|
|
|
* path to protect mstm->is_mst without potentially deadlocking
|
|
|
|
*/
|
|
|
|
mutex_lock(&outp->dp.hpd_irq_lock);
|
|
|
|
mstm->suspended = true;
|
|
|
|
mutex_unlock(&outp->dp.hpd_irq_lock);
|
|
|
|
|
|
|
|
if (mstm->is_mst)
|
2016-11-04 17:20:36 +10:00
|
|
|
drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
nv50_mstm_init(struct nouveau_encoder *outp, bool runtime)
|
2016-11-04 17:20:36 +10:00
|
|
|
{
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
struct nv50_mstm *mstm = outp->dp.mstm;
|
|
|
|
int ret = 0;
|
2018-11-14 20:39:51 -05:00
|
|
|
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
if (!mstm)
|
2018-11-14 20:39:51 -05:00
|
|
|
return;
|
|
|
|
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
if (mstm->is_mst) {
|
|
|
|
ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime);
|
|
|
|
if (ret == -1)
|
|
|
|
nv50_mstm_remove(mstm);
|
2018-11-14 20:39:51 -05:00
|
|
|
}
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
|
|
|
|
mutex_lock(&outp->dp.hpd_irq_lock);
|
|
|
|
mstm->suspended = false;
|
|
|
|
mutex_unlock(&outp->dp.hpd_irq_lock);
|
|
|
|
|
|
|
|
if (ret == -1)
|
|
|
|
drm_kms_helper_hotplug_event(mstm->mgr.dev);
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
static void
|
|
|
|
nv50_mstm_del(struct nv50_mstm **pmstm)
|
|
|
|
{
|
|
|
|
struct nv50_mstm *mstm = *pmstm;
|
|
|
|
if (mstm) {
|
2018-12-11 18:56:20 -05:00
|
|
|
drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
|
2016-11-04 17:20:36 +10:00
|
|
|
kfree(*pmstm);
|
|
|
|
*pmstm = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
|
|
|
|
int conn_base_id, struct nv50_mstm **pmstm)
|
|
|
|
{
|
|
|
|
const int max_payloads = hweight8(outp->dcb->heads);
|
|
|
|
struct drm_device *dev = outp->base.base.dev;
|
|
|
|
struct nv50_mstm *mstm;
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
int ret;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
|
|
|
if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
|
|
|
|
return -ENOMEM;
|
|
|
|
mstm->outp = outp;
|
2016-11-04 17:20:36 +10:00
|
|
|
mstm->mgr.cbs = &nv50_mstm;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2017-01-24 15:49:29 -08:00
|
|
|
ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
|
2016-11-04 17:20:36 +10:00
|
|
|
max_payloads, conn_base_id);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-07-04 16:25:18 +10:00
|
|
|
/******************************************************************************
|
|
|
|
* SOR
|
|
|
|
*****************************************************************************/
|
2012-03-12 15:23:44 +10:00
|
|
|
static void
|
2016-11-04 17:20:36 +10:00
|
|
|
nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
|
2018-05-08 20:39:47 +10:00
|
|
|
struct nv50_head_atom *asyh, u8 proto, u8 depth)
|
2012-03-12 15:23:44 +10:00
|
|
|
{
|
2018-05-08 20:39:47 +10:00
|
|
|
struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
|
2018-05-08 20:39:47 +10:00
|
|
|
struct nv50_core *core = disp->core;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
if (!asyh) {
|
2016-11-04 17:20:36 +10:00
|
|
|
nv_encoder->ctrl &= ~BIT(head);
|
2020-06-20 18:09:59 +10:00
|
|
|
if (NVDEF_TEST(nv_encoder->ctrl, NV507D, SOR_SET_CONTROL, OWNER, ==, NONE))
|
2016-11-04 17:20:36 +10:00
|
|
|
nv_encoder->ctrl = 0;
|
|
|
|
} else {
|
2020-06-20 18:09:59 +10:00
|
|
|
nv_encoder->ctrl |= NVVAL(NV507D, SOR_SET_CONTROL, PROTOCOL, proto);
|
2016-11-04 17:20:36 +10:00
|
|
|
nv_encoder->ctrl |= BIT(head);
|
2018-05-08 20:39:47 +10:00
|
|
|
asyh->or.depth = depth;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
core->func->sor->ctrl(core, nv_encoder->or, nv_encoder->ctrl, asyh);
|
2014-06-05 10:59:55 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-08-26 14:24:42 -04:00
|
|
|
nv50_sor_disable(struct drm_encoder *encoder,
|
|
|
|
struct drm_atomic_state *state)
|
2014-06-05 10:59:55 +10:00
|
|
|
{
|
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
|
|
|
struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
|
2020-08-26 14:24:43 -04:00
|
|
|
struct nouveau_connector *nv_connector =
|
|
|
|
nv50_outp_get_old_connector(nv_encoder, state);
|
2012-11-16 11:40:34 +10:00
|
|
|
|
|
|
|
nv_encoder->crtc = NULL;
|
2014-06-05 10:59:55 +10:00
|
|
|
|
|
|
|
if (nv_crtc) {
|
2020-08-26 14:24:43 -04:00
|
|
|
struct drm_dp_aux *aux = &nv_connector->aux;
|
2016-11-04 17:20:36 +10:00
|
|
|
u8 pwr;
|
|
|
|
|
2020-08-26 14:24:43 -04:00
|
|
|
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
|
|
|
|
int ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
if (ret == 0) {
|
|
|
|
pwr &= ~DP_SET_POWER_MASK;
|
|
|
|
pwr |= DP_SET_POWER_D3;
|
2020-08-26 14:24:43 -04:00
|
|
|
drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
|
2016-11-04 17:20:36 +10:00
|
|
|
nv50_audio_disable(encoder, nv_crtc);
|
|
|
|
nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
|
2017-05-19 23:59:35 +10:00
|
|
|
nv50_outp_release(nv_encoder);
|
2014-06-05 10:59:55 +10:00
|
|
|
}
|
2012-03-12 15:23:44 +10:00
|
|
|
}
|
|
|
|
|
2011-07-05 13:08:40 +10:00
|
|
|
static void
|
2020-11-13 19:14:10 -05:00
|
|
|
nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
|
2011-07-05 13:08:40 +10:00
|
|
|
{
|
2014-08-10 04:10:27 +10:00
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
|
|
|
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
|
2018-05-08 20:39:47 +10:00
|
|
|
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
|
|
|
|
struct drm_display_mode *mode = &asyh->state.adjusted_mode;
|
2014-08-10 04:10:27 +10:00
|
|
|
struct {
|
|
|
|
struct nv50_disp_mthd_v1 base;
|
|
|
|
struct nv50_disp_sor_lvds_script_v0 lvds;
|
|
|
|
} lvds = {
|
|
|
|
.base.version = 1,
|
|
|
|
.base.method = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT,
|
|
|
|
.base.hasht = nv_encoder->dcb->hasht,
|
|
|
|
.base.hashm = nv_encoder->dcb->hashm,
|
|
|
|
};
|
2012-11-21 14:40:21 +10:00
|
|
|
struct nv50_disp *disp = nv50_disp(encoder->dev);
|
2011-11-11 18:13:13 +10:00
|
|
|
struct drm_device *dev = encoder->dev;
|
2012-07-31 16:16:21 +10:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
2011-07-08 12:52:14 +10:00
|
|
|
struct nouveau_connector *nv_connector;
|
2012-07-31 16:16:21 +10:00
|
|
|
struct nvbios *bios = &drm->vbios;
|
2020-06-03 11:37:56 +10:00
|
|
|
bool hda = false;
|
2020-06-20 18:09:59 +10:00
|
|
|
u8 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM;
|
|
|
|
u8 depth = NV837D_SOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT;
|
2011-07-05 13:08:40 +10:00
|
|
|
|
2020-08-26 14:24:42 -04:00
|
|
|
nv_connector = nv50_outp_get_new_connector(nv_encoder, state);
|
2014-06-05 10:59:55 +10:00
|
|
|
nv_encoder->crtc = encoder->crtc;
|
2020-06-03 11:37:56 +10:00
|
|
|
|
|
|
|
if ((disp->disp->object.oclass == GT214_DISP ||
|
|
|
|
disp->disp->object.oclass >= GF110_DISP) &&
|
|
|
|
drm_detect_monitor_audio(nv_connector->edid))
|
|
|
|
hda = true;
|
|
|
|
nv50_outp_acquire(nv_encoder, hda);
|
2014-06-05 10:59:55 +10:00
|
|
|
|
2011-07-08 12:52:14 +10:00
|
|
|
switch (nv_encoder->dcb->type) {
|
2012-07-11 10:44:20 +10:00
|
|
|
case DCB_OUTPUT_TMDS:
|
2017-05-19 23:59:35 +10:00
|
|
|
if (nv_encoder->link & 1) {
|
2020-06-20 18:09:59 +10:00
|
|
|
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_A;
|
2015-11-03 21:00:10 -05:00
|
|
|
/* Only enable dual-link if:
|
|
|
|
* - Need to (i.e. rate > 165MHz)
|
|
|
|
* - DCB says we can
|
|
|
|
* - Not an HDMI monitor, since there's no dual-link
|
|
|
|
* on HDMI.
|
|
|
|
*/
|
|
|
|
if (mode->clock >= 165000 &&
|
|
|
|
nv_encoder->dcb->duallink_possible &&
|
|
|
|
!drm_detect_hdmi_monitor(nv_connector->edid))
|
2020-06-20 18:09:59 +10:00
|
|
|
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_DUAL_TMDS;
|
2011-07-08 12:52:14 +10:00
|
|
|
} else {
|
2020-06-20 18:09:59 +10:00
|
|
|
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_SINGLE_TMDS_B;
|
2011-07-08 12:52:14 +10:00
|
|
|
}
|
|
|
|
|
2020-08-26 14:24:42 -04:00
|
|
|
nv50_hdmi_enable(&nv_encoder->base.base, state, mode);
|
2011-07-08 12:52:14 +10:00
|
|
|
break;
|
2012-07-11 10:44:20 +10:00
|
|
|
case DCB_OUTPUT_LVDS:
|
2020-06-20 18:09:59 +10:00
|
|
|
proto = NV507D_SOR_SET_CONTROL_PROTOCOL_LVDS_CUSTOM;
|
2012-11-16 11:40:34 +10:00
|
|
|
|
2011-07-08 12:52:14 +10:00
|
|
|
if (bios->fp_no_ddc) {
|
|
|
|
if (bios->fp.dual_link)
|
2014-08-10 04:10:27 +10:00
|
|
|
lvds.lvds.script |= 0x0100;
|
2011-07-08 12:52:14 +10:00
|
|
|
if (bios->fp.if_is_24bit)
|
2014-08-10 04:10:27 +10:00
|
|
|
lvds.lvds.script |= 0x0200;
|
2011-07-08 12:52:14 +10:00
|
|
|
} else {
|
2011-11-18 10:23:59 +10:00
|
|
|
if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
|
2011-07-08 12:52:14 +10:00
|
|
|
if (((u8 *)nv_connector->edid)[121] == 2)
|
2014-08-10 04:10:27 +10:00
|
|
|
lvds.lvds.script |= 0x0100;
|
2011-07-08 12:52:14 +10:00
|
|
|
} else
|
|
|
|
if (mode->clock >= bios->fp.duallink_transition_clk) {
|
2014-08-10 04:10:27 +10:00
|
|
|
lvds.lvds.script |= 0x0100;
|
2011-07-08 12:52:14 +10:00
|
|
|
}
|
2011-07-05 13:08:40 +10:00
|
|
|
|
2014-08-10 04:10:27 +10:00
|
|
|
if (lvds.lvds.script & 0x0100) {
|
2011-07-08 12:52:14 +10:00
|
|
|
if (bios->fp.strapless_is_24bit & 2)
|
2014-08-10 04:10:27 +10:00
|
|
|
lvds.lvds.script |= 0x0200;
|
2011-07-08 12:52:14 +10:00
|
|
|
} else {
|
|
|
|
if (bios->fp.strapless_is_24bit & 1)
|
2014-08-10 04:10:27 +10:00
|
|
|
lvds.lvds.script |= 0x0200;
|
2011-07-08 12:52:14 +10:00
|
|
|
}
|
|
|
|
|
2019-11-15 16:07:19 -05:00
|
|
|
if (asyh->or.bpc == 8)
|
2014-08-10 04:10:27 +10:00
|
|
|
lvds.lvds.script |= 0x0200;
|
2011-07-08 12:52:14 +10:00
|
|
|
}
|
2012-11-09 11:25:37 +10:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
nvif_mthd(&disp->disp->object, 0, &lvds, sizeof(lvds));
|
2011-07-08 12:52:14 +10:00
|
|
|
break;
|
2012-07-11 10:44:20 +10:00
|
|
|
case DCB_OUTPUT_DP:
|
2019-11-15 16:07:19 -05:00
|
|
|
depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
|
2012-03-11 01:28:48 +10:00
|
|
|
|
2017-05-19 23:59:35 +10:00
|
|
|
if (nv_encoder->link & 1)
|
2020-06-20 18:09:59 +10:00
|
|
|
proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_A;
|
2012-03-11 01:28:48 +10:00
|
|
|
else
|
2020-06-20 18:09:59 +10:00
|
|
|
proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2020-08-26 14:24:42 -04:00
|
|
|
nv50_audio_enable(encoder, state, mode);
|
2012-03-11 01:28:48 +10:00
|
|
|
break;
|
2011-07-08 12:52:14 +10:00
|
|
|
default:
|
2016-03-03 12:56:33 +10:00
|
|
|
BUG();
|
2011-07-08 12:52:14 +10:00
|
|
|
break;
|
|
|
|
}
|
2011-07-08 11:53:37 +10:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
nv_encoder->update(nv_encoder, nv_crtc->index, asyh, proto, depth);
|
2011-07-05 13:08:40 +10:00
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
static const struct drm_encoder_helper_funcs
|
|
|
|
nv50_sor_help = {
|
2016-11-04 17:20:36 +10:00
|
|
|
.atomic_check = nv50_outp_atomic_check,
|
2020-08-26 14:24:42 -04:00
|
|
|
.atomic_enable = nv50_sor_enable,
|
|
|
|
.atomic_disable = nv50_sor_disable,
|
2016-11-04 17:20:36 +10:00
|
|
|
};
|
|
|
|
|
2011-07-05 13:08:40 +10:00
|
|
|
static void
|
2012-11-21 14:40:21 +10:00
|
|
|
nv50_sor_destroy(struct drm_encoder *encoder)
|
2011-07-05 13:08:40 +10:00
|
|
|
{
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
|
|
|
nv50_mstm_del(&nv_encoder->dp.mstm);
|
2011-07-05 13:08:40 +10:00
|
|
|
drm_encoder_cleanup(encoder);
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
|
|
|
|
if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
|
|
|
|
mutex_destroy(&nv_encoder->dp.hpd_irq_lock);
|
|
|
|
|
2011-07-05 13:08:40 +10:00
|
|
|
kfree(encoder);
|
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
static const struct drm_encoder_funcs
|
|
|
|
nv50_sor_func = {
|
2012-11-21 14:40:21 +10:00
|
|
|
.destroy = nv50_sor_destroy,
|
2011-07-05 13:08:40 +10:00
|
|
|
};
|
|
|
|
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
static bool nv50_has_mst(struct nouveau_drm *drm)
|
|
|
|
{
|
|
|
|
struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
|
|
|
|
u32 data;
|
|
|
|
u8 ver, hdr, cnt, len;
|
|
|
|
|
|
|
|
data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len);
|
|
|
|
return data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04);
|
|
|
|
}
|
|
|
|
|
2011-07-05 13:08:40 +10:00
|
|
|
static int
|
2012-11-21 14:40:21 +10:00
|
|
|
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
2011-07-05 13:08:40 +10:00
|
|
|
{
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nouveau_connector *nv_connector = nouveau_connector(connector);
|
2013-02-11 20:15:03 +10:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(connector->dev);
|
2016-05-18 13:57:42 +10:00
|
|
|
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
|
2011-07-05 13:08:40 +10:00
|
|
|
struct nouveau_encoder *nv_encoder;
|
|
|
|
struct drm_encoder *encoder;
|
2020-05-11 18:41:24 -04:00
|
|
|
struct nv50_disp *disp = nv50_disp(connector->dev);
|
2016-11-04 17:20:36 +10:00
|
|
|
int type, ret;
|
2013-02-11 20:15:03 +10:00
|
|
|
|
|
|
|
switch (dcbe->type) {
|
|
|
|
case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
|
|
|
|
case DCB_OUTPUT_TMDS:
|
|
|
|
case DCB_OUTPUT_DP:
|
|
|
|
default:
|
|
|
|
type = DRM_MODE_ENCODER_TMDS;
|
|
|
|
break;
|
|
|
|
}
|
2011-07-05 13:08:40 +10:00
|
|
|
|
|
|
|
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
|
|
|
|
if (!nv_encoder)
|
|
|
|
return -ENOMEM;
|
|
|
|
nv_encoder->dcb = dcbe;
|
2016-11-04 17:20:36 +10:00
|
|
|
nv_encoder->update = nv50_sor_update;
|
2011-07-05 13:08:40 +10:00
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
encoder = to_drm_encoder(nv_encoder);
|
|
|
|
encoder->possible_crtcs = dcbe->heads;
|
|
|
|
encoder->possible_clones = 0;
|
2016-11-04 17:20:36 +10:00
|
|
|
drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
|
|
|
|
"sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
|
2016-11-04 17:20:36 +10:00
|
|
|
drm_encoder_helper_add(encoder, &nv50_sor_help);
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2018-07-09 10:40:07 +02:00
|
|
|
drm_connector_attach_encoder(connector, encoder);
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2020-05-11 18:41:24 -04:00
|
|
|
disp->core->func->sor->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
|
|
|
|
|
2015-08-20 14:54:15 +10:00
|
|
|
if (dcbe->type == DCB_OUTPUT_DP) {
|
|
|
|
struct nvkm_i2c_aux *aux =
|
|
|
|
nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
|
2020-05-11 18:41:24 -04:00
|
|
|
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
mutex_init(&nv_encoder->dp.hpd_irq_lock);
|
|
|
|
|
2015-08-20 14:54:15 +10:00
|
|
|
if (aux) {
|
2018-05-08 20:39:47 +10:00
|
|
|
if (disp->disp->object.oclass < GF110_DISP) {
|
2017-07-19 16:49:59 +10:00
|
|
|
/* HW has no support for address-only
|
|
|
|
* transactions, so we're required to
|
|
|
|
* use custom I2C-over-AUX code.
|
|
|
|
*/
|
|
|
|
nv_encoder->i2c = &aux->i2c;
|
|
|
|
} else {
|
|
|
|
nv_encoder->i2c = &nv_connector->aux.ddc;
|
|
|
|
}
|
2015-08-20 14:54:15 +10:00
|
|
|
nv_encoder->aux = aux;
|
|
|
|
}
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2019-09-13 18:03:50 -04:00
|
|
|
if (nv_connector->type != DCB_CONNECTOR_eDP &&
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
nv50_has_mst(drm)) {
|
|
|
|
ret = nv50_mstm_new(nv_encoder, &nv_connector->aux,
|
|
|
|
16, nv_connector->base.base.id,
|
2016-11-04 17:20:36 +10:00
|
|
|
&nv_encoder->dp.mstm);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
2015-08-20 14:54:15 +10:00
|
|
|
} else {
|
|
|
|
struct nvkm_i2c_bus *bus =
|
|
|
|
nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
|
|
|
|
if (bus)
|
|
|
|
nv_encoder->i2c = &bus->i2c;
|
|
|
|
}
|
|
|
|
|
2011-07-05 13:08:40 +10:00
|
|
|
return 0;
|
|
|
|
}
|
2011-07-04 16:25:18 +10:00
|
|
|
|
2013-02-11 09:52:58 +10:00
|
|
|
/******************************************************************************
|
|
|
|
* PIOR
|
|
|
|
*****************************************************************************/
|
2016-11-04 17:20:36 +10:00
|
|
|
static int
|
|
|
|
nv50_pior_atomic_check(struct drm_encoder *encoder,
|
|
|
|
struct drm_crtc_state *crtc_state,
|
|
|
|
struct drm_connector_state *conn_state)
|
2013-02-11 09:52:58 +10:00
|
|
|
{
|
2016-11-04 17:20:36 +10:00
|
|
|
int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
crtc_state->adjusted_mode.clock *= 2;
|
|
|
|
return 0;
|
2013-02-11 09:52:58 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-11-13 19:14:10 -05:00
|
|
|
nv50_pior_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
|
2013-02-11 09:52:58 +10:00
|
|
|
{
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
2018-05-08 20:39:47 +10:00
|
|
|
struct nv50_core *core = nv50_disp(encoder->dev)->core;
|
2020-06-20 18:09:59 +10:00
|
|
|
const u32 ctrl = NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, NONE);
|
2018-05-08 20:39:47 +10:00
|
|
|
if (nv_encoder->crtc)
|
2020-06-20 18:09:59 +10:00
|
|
|
core->func->pior->ctrl(core, nv_encoder->or, ctrl, NULL);
|
2016-11-04 17:20:36 +10:00
|
|
|
nv_encoder->crtc = NULL;
|
2017-05-19 23:59:35 +10:00
|
|
|
nv50_outp_release(nv_encoder);
|
2013-02-11 09:52:58 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-11-13 19:14:10 -05:00
|
|
|
nv50_pior_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
|
2013-02-11 09:52:58 +10:00
|
|
|
{
|
|
|
|
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
|
|
|
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
|
2018-05-08 20:39:47 +10:00
|
|
|
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
|
2018-05-08 20:39:47 +10:00
|
|
|
struct nv50_core *core = nv50_disp(encoder->dev)->core;
|
2020-06-20 18:09:59 +10:00
|
|
|
u32 ctrl = 0;
|
|
|
|
|
|
|
|
switch (nv_crtc->index) {
|
|
|
|
case 0: ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, HEAD0); break;
|
|
|
|
case 1: ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, OWNER, HEAD1); break;
|
|
|
|
default:
|
|
|
|
WARN_ON(1);
|
|
|
|
break;
|
|
|
|
}
|
2013-02-11 09:52:58 +10:00
|
|
|
|
2020-06-03 11:37:56 +10:00
|
|
|
nv50_outp_acquire(nv_encoder, false);
|
2017-05-19 23:59:35 +10:00
|
|
|
|
2019-11-15 16:07:19 -05:00
|
|
|
switch (asyh->or.bpc) {
|
2020-06-20 18:09:59 +10:00
|
|
|
case 10: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_30_444; break;
|
|
|
|
case 8: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_24_444; break;
|
|
|
|
case 6: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_BPP_18_444; break;
|
|
|
|
default: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT; break;
|
2013-02-11 09:52:58 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (nv_encoder->dcb->type) {
|
|
|
|
case DCB_OUTPUT_TMDS:
|
|
|
|
case DCB_OUTPUT_DP:
|
2020-06-20 18:09:59 +10:00
|
|
|
ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, PROTOCOL, EXT_TMDS_ENC);
|
2013-02-11 09:52:58 +10:00
|
|
|
break;
|
|
|
|
default:
|
2016-03-03 12:56:33 +10:00
|
|
|
BUG();
|
2013-02-11 09:52:58 +10:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-06-20 18:09:59 +10:00
|
|
|
core->func->pior->ctrl(core, nv_encoder->or, ctrl, asyh);
|
2020-11-13 19:14:10 -05:00
|
|
|
nv_encoder->crtc = &nv_crtc->base;
|
2013-02-11 09:52:58 +10:00
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
static const struct drm_encoder_helper_funcs
|
|
|
|
nv50_pior_help = {
|
2016-11-04 17:20:36 +10:00
|
|
|
.atomic_check = nv50_pior_atomic_check,
|
2020-11-13 19:14:10 -05:00
|
|
|
.atomic_enable = nv50_pior_enable,
|
|
|
|
.atomic_disable = nv50_pior_disable,
|
2013-02-11 09:52:58 +10:00
|
|
|
};
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
static void
|
|
|
|
nv50_pior_destroy(struct drm_encoder *encoder)
|
|
|
|
{
|
|
|
|
drm_encoder_cleanup(encoder);
|
|
|
|
kfree(encoder);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_encoder_funcs
|
|
|
|
nv50_pior_func = {
|
2013-02-11 09:52:58 +10:00
|
|
|
.destroy = nv50_pior_destroy,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
|
|
|
{
|
2020-05-11 18:41:24 -04:00
|
|
|
struct drm_device *dev = connector->dev;
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
|
|
|
struct nv50_disp *disp = nv50_disp(dev);
|
2016-05-18 13:57:42 +10:00
|
|
|
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
|
2015-08-20 14:54:15 +10:00
|
|
|
struct nvkm_i2c_bus *bus = NULL;
|
|
|
|
struct nvkm_i2c_aux *aux = NULL;
|
|
|
|
struct i2c_adapter *ddc;
|
2013-02-11 09:52:58 +10:00
|
|
|
struct nouveau_encoder *nv_encoder;
|
|
|
|
struct drm_encoder *encoder;
|
|
|
|
int type;
|
|
|
|
|
|
|
|
switch (dcbe->type) {
|
|
|
|
case DCB_OUTPUT_TMDS:
|
2015-08-20 14:54:15 +10:00
|
|
|
bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
|
|
|
|
ddc = bus ? &bus->i2c : NULL;
|
2013-02-11 09:52:58 +10:00
|
|
|
type = DRM_MODE_ENCODER_TMDS;
|
|
|
|
break;
|
|
|
|
case DCB_OUTPUT_DP:
|
2015-08-20 14:54:15 +10:00
|
|
|
aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
|
2018-05-08 20:39:47 +10:00
|
|
|
ddc = aux ? &aux->i2c : NULL;
|
2013-02-11 09:52:58 +10:00
|
|
|
type = DRM_MODE_ENCODER_TMDS;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
|
|
|
|
if (!nv_encoder)
|
|
|
|
return -ENOMEM;
|
|
|
|
nv_encoder->dcb = dcbe;
|
|
|
|
nv_encoder->i2c = ddc;
|
2015-08-20 14:54:15 +10:00
|
|
|
nv_encoder->aux = aux;
|
2013-02-11 09:52:58 +10:00
|
|
|
|
|
|
|
encoder = to_drm_encoder(nv_encoder);
|
|
|
|
encoder->possible_crtcs = dcbe->heads;
|
|
|
|
encoder->possible_clones = 0;
|
2016-11-04 17:20:36 +10:00
|
|
|
drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
|
|
|
|
"pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
|
2016-11-04 17:20:36 +10:00
|
|
|
drm_encoder_helper_add(encoder, &nv50_pior_help);
|
2013-02-11 09:52:58 +10:00
|
|
|
|
2018-07-09 10:40:07 +02:00
|
|
|
drm_connector_attach_encoder(connector, encoder);
|
2020-05-11 18:41:24 -04:00
|
|
|
|
|
|
|
disp->core->func->pior->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
|
|
|
|
|
2013-02-11 09:52:58 +10:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
/******************************************************************************
|
|
|
|
* Atomic
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
static void
|
2018-07-03 10:52:34 +10:00
|
|
|
nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
|
2016-11-04 17:20:36 +10:00
|
|
|
{
|
2018-07-03 10:52:34 +10:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(state->dev);
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nv50_disp *disp = nv50_disp(drm->dev);
|
2018-05-08 20:39:47 +10:00
|
|
|
struct nv50_core *core = disp->core;
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nv50_mstm *mstm;
|
|
|
|
struct drm_encoder *encoder;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
NV_ATOMIC(drm, "commit core %08x\n", interlock[NV50_DISP_INTERLOCK_BASE]);
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
drm_for_each_encoder(encoder, drm->dev) {
|
|
|
|
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
|
|
|
|
mstm = nouveau_encoder(encoder)->dp.mstm;
|
|
|
|
if (mstm && mstm->modified)
|
|
|
|
nv50_mstm_prepare(mstm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
core->func->ntfy_init(disp->sync, NV50_DISP_CORE_NTFY);
|
|
|
|
core->func->update(core, interlock, true);
|
|
|
|
if (core->func->ntfy_wait_done(disp->sync, NV50_DISP_CORE_NTFY,
|
|
|
|
disp->core->chan.base.device))
|
|
|
|
NV_ERROR(drm, "core notifier timeout\n");
|
2016-11-04 17:20:36 +10:00
|
|
|
|
|
|
|
drm_for_each_encoder(encoder, drm->dev) {
|
|
|
|
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
|
|
|
|
mstm = nouveau_encoder(encoder)->dp.mstm;
|
|
|
|
if (mstm && mstm->modified)
|
|
|
|
nv50_mstm_cleanup(mstm);
|
|
|
|
}
|
|
|
|
}
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
2018-07-03 10:52:34 +10:00
|
|
|
static void
|
|
|
|
nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock)
|
|
|
|
{
|
|
|
|
struct drm_plane_state *new_plane_state;
|
|
|
|
struct drm_plane *plane;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
|
|
|
|
struct nv50_wndw *wndw = nv50_wndw(plane);
|
|
|
|
if (interlock[wndw->interlock.type] & wndw->interlock.data) {
|
|
|
|
if (wndw->func->update)
|
|
|
|
wndw->func->update(wndw, interlock);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
static void
|
|
|
|
nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
struct drm_device *dev = state->dev;
|
2017-08-15 10:52:50 +02:00
|
|
|
struct drm_crtc_state *new_crtc_state, *old_crtc_state;
|
2016-11-04 17:20:36 +10:00
|
|
|
struct drm_crtc *crtc;
|
2017-07-19 16:39:19 +02:00
|
|
|
struct drm_plane_state *new_plane_state;
|
2016-11-04 17:20:36 +10:00
|
|
|
struct drm_plane *plane;
|
|
|
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
|
|
|
struct nv50_disp *disp = nv50_disp(dev);
|
|
|
|
struct nv50_atom *atom = nv50_atom(state);
|
2020-02-03 03:36:30 -05:00
|
|
|
struct nv50_core *core = disp->core;
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nv50_outp_atom *outp, *outt;
|
2018-05-08 20:39:47 +10:00
|
|
|
u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {};
|
2016-11-04 17:20:36 +10:00
|
|
|
int i;
|
2020-06-29 18:36:25 -04:00
|
|
|
bool flushed = false;
|
2016-11-04 17:20:36 +10:00
|
|
|
|
|
|
|
NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
|
drm/nouveau/kms/nvd9-: Add CRC support
This introduces support for CRC readback on gf119+, using the
documentation generously provided to us by Nvidia:
https://github.com/NVIDIA/open-gpu-doc/blob/master/Display-CRC/display-crc.txt
We expose all available CRC sources. SF, SOR, PIOR, and DAC are exposed
through a single set of "outp" sources: outp-active/auto for a CRC of
the scanout region, outp-complete for a CRC of both the scanout and
blanking/sync region combined, and outp-inactive for a CRC of only the
blanking/sync region. For each source, nouveau selects the appropriate
tap point based on the output path in use. We also expose an "rg"
source, which allows for capturing CRCs of the scanout raster before
it's encoded into a video signal in the output path. This tap point is
referred to as the raster generator.
Note that while there's some other neat features that can be used with
CRC capture on nvidia hardware, like capturing from two CRC sources
simultaneously, I couldn't see any usecase for them and did not
implement them.
Nvidia only allows for accessing CRCs through a shared DMA region that
we program through the core EVO/NvDisplay channel which is referred to
as the notifier context. The notifier context is limited to either 255
(for Fermi-Pascal) or 2047 (Volta+) entries to store CRCs in, and
unfortunately the hardware simply drops CRCs and reports an overflow
once all available entries in the notifier context are filled.
Since the DRM CRC API and igt-gpu-tools don't expect there to be a limit
on how many CRCs can be captured, we work around this in nouveau by
allocating two separate notifier contexts for each head instead of one.
We schedule a vblank worker ahead of time so that once we start getting
close to filling up all of the available entries in the notifier
context, we can swap the currently used notifier context out with
another pre-prepared notifier context in a manner similar to page
flipping.
Unfortunately, the hardware only allows us to this by flushing two
separate updates on the core channel: one to release the current
notifier context handle, and one to program the next notifier context's
handle. When the hardware processes the first update, the CRC for the
current frame is lost. However, the second update can be flushed
immediately without waiting for the first to complete so that CRC
generation resumes on the next frame. According to Nvidia's hardware
engineers, there isn't any cleaner way of flipping notifier contexts
that would avoid this.
Since using vblank workers to swap out the notifier context will ensure
we can usually flush both updates to hardware within the timespan of a
single frame, we can also ensure that there will only be exactly one
frame lost between the first and second update being executed by the
hardware. This gives us the guarantee that we're always correctly
matching each CRC entry with it's respective frame even after a context
flip. And since IGT will retrieve the CRC entry for a frame by waiting
until it receives a CRC for any subsequent frames, this doesn't cause an
issue with any tests and is much simpler than trying to change the
current DRM API to accommodate.
In order to facilitate testing of correct handling of this limitation,
we also expose a debugfs interface to manually control the threshold for
when we start trying to flip the notifier context. We will use this in
igt to trigger a context flip for testing purposes without needing to
wait for the notifier to completely fill up. This threshold is reset
to the default value set by nouveau after each capture, and is exposed
in a separate folder within each CRTC's debugfs directory labelled
"nv_crc".
Changes since v1:
* Forgot to finish saving crc.h before saving, whoops. This just adds
some corrections to the empty function declarations that we use if
CONFIG_DEBUG_FS isn't enabled.
Changes since v2:
* Don't check return code from debugfs_create_dir() or
debugfs_create_file() - Greg K-H
Changes since v3:
(no functional changes)
* Fix SPDX license identifiers (checkpatch)
* s/uint32_t/u32/ (checkpatch)
* Fix indenting in switch cases (checkpatch)
Changes since v4:
* Remove unneeded param changes with nv50_head_flush_clr/set
* Rebase
Changes since v5:
* Remove set but unused variable (outp) in nv50_crc_atomic_check() -
Kbuild bot
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Acked-by: Dave Airlie <airlied@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200627194657.156514-10-lyude@redhat.com
2019-10-07 14:20:12 -04:00
|
|
|
nv50_crc_atomic_stop_reporting(state);
|
2016-11-04 17:20:36 +10:00
|
|
|
drm_atomic_helper_wait_for_fences(dev, state, false);
|
|
|
|
drm_atomic_helper_wait_for_dependencies(state);
|
|
|
|
drm_atomic_helper_update_legacy_modeset_state(dev, state);
|
2020-09-07 15:00:25 +03:00
|
|
|
drm_atomic_helper_calc_timestamping_constants(state);
|
2016-11-04 17:20:36 +10:00
|
|
|
|
|
|
|
if (atom->lock_core)
|
|
|
|
mutex_lock(&disp->mutex);
|
|
|
|
|
|
|
|
/* Disable head(s). */
|
2017-08-15 10:52:50 +02:00
|
|
|
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
2017-07-19 16:39:19 +02:00
|
|
|
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nv50_head *head = nv50_head(crtc);
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
|
|
|
|
asyh->clr.mask, asyh->set.mask);
|
2019-08-07 19:47:06 -04:00
|
|
|
|
|
|
|
if (old_crtc_state->active && !new_crtc_state->active) {
|
|
|
|
pm_runtime_put_noidle(dev->dev);
|
2017-07-24 11:01:52 +10:00
|
|
|
drm_crtc_vblank_off(crtc);
|
2019-08-07 19:47:06 -04:00
|
|
|
}
|
2016-11-04 17:20:36 +10:00
|
|
|
|
|
|
|
if (asyh->clr.mask) {
|
|
|
|
nv50_head_flush_clr(head, asyh, atom->flush_disable);
|
2018-05-08 20:39:47 +10:00
|
|
|
interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Disable plane(s). */
|
2017-07-19 16:39:19 +02:00
|
|
|
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
|
|
|
|
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nv50_wndw *wndw = nv50_wndw(plane);
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
|
|
|
|
asyw->clr.mask, asyw->set.mask);
|
|
|
|
if (!asyw->clr.mask)
|
|
|
|
continue;
|
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
nv50_wndw_flush_clr(wndw, interlock, atom->flush_disable, asyw);
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Disable output path(s). */
|
|
|
|
list_for_each_entry(outp, &atom->outp, head) {
|
|
|
|
const struct drm_encoder_helper_funcs *help;
|
|
|
|
struct drm_encoder *encoder;
|
|
|
|
|
|
|
|
encoder = outp->encoder;
|
|
|
|
help = encoder->helper_private;
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
|
|
|
|
outp->clr.mask, outp->set.mask);
|
|
|
|
|
|
|
|
if (outp->clr.mask) {
|
2020-08-26 14:24:42 -04:00
|
|
|
help->atomic_disable(encoder, state);
|
2018-05-08 20:39:47 +10:00
|
|
|
interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
|
2016-11-04 17:20:36 +10:00
|
|
|
if (outp->flush_disable) {
|
2018-07-03 10:52:34 +10:00
|
|
|
nv50_disp_atomic_commit_wndw(state, interlock);
|
|
|
|
nv50_disp_atomic_commit_core(state, interlock);
|
2018-05-08 20:39:47 +10:00
|
|
|
memset(interlock, 0x00, sizeof(interlock));
|
2020-06-29 18:36:25 -04:00
|
|
|
|
|
|
|
flushed = true;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Flush disable. */
|
2018-05-08 20:39:47 +10:00
|
|
|
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
|
2016-11-04 17:20:36 +10:00
|
|
|
if (atom->flush_disable) {
|
2018-07-03 10:52:34 +10:00
|
|
|
nv50_disp_atomic_commit_wndw(state, interlock);
|
|
|
|
nv50_disp_atomic_commit_core(state, interlock);
|
2018-05-08 20:39:47 +10:00
|
|
|
memset(interlock, 0x00, sizeof(interlock));
|
2020-06-29 18:36:25 -04:00
|
|
|
|
|
|
|
flushed = true;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-29 18:36:25 -04:00
|
|
|
if (flushed)
|
|
|
|
nv50_crc_atomic_release_notifier_contexts(state);
|
|
|
|
nv50_crc_atomic_init_notifier_contexts(state);
|
drm/nouveau/kms/nvd9-: Add CRC support
This introduces support for CRC readback on gf119+, using the
documentation generously provided to us by Nvidia:
https://github.com/NVIDIA/open-gpu-doc/blob/master/Display-CRC/display-crc.txt
We expose all available CRC sources. SF, SOR, PIOR, and DAC are exposed
through a single set of "outp" sources: outp-active/auto for a CRC of
the scanout region, outp-complete for a CRC of both the scanout and
blanking/sync region combined, and outp-inactive for a CRC of only the
blanking/sync region. For each source, nouveau selects the appropriate
tap point based on the output path in use. We also expose an "rg"
source, which allows for capturing CRCs of the scanout raster before
it's encoded into a video signal in the output path. This tap point is
referred to as the raster generator.
Note that while there's some other neat features that can be used with
CRC capture on nvidia hardware, like capturing from two CRC sources
simultaneously, I couldn't see any usecase for them and did not
implement them.
Nvidia only allows for accessing CRCs through a shared DMA region that
we program through the core EVO/NvDisplay channel which is referred to
as the notifier context. The notifier context is limited to either 255
(for Fermi-Pascal) or 2047 (Volta+) entries to store CRCs in, and
unfortunately the hardware simply drops CRCs and reports an overflow
once all available entries in the notifier context are filled.
Since the DRM CRC API and igt-gpu-tools don't expect there to be a limit
on how many CRCs can be captured, we work around this in nouveau by
allocating two separate notifier contexts for each head instead of one.
We schedule a vblank worker ahead of time so that once we start getting
close to filling up all of the available entries in the notifier
context, we can swap the currently used notifier context out with
another pre-prepared notifier context in a manner similar to page
flipping.
Unfortunately, the hardware only allows us to this by flushing two
separate updates on the core channel: one to release the current
notifier context handle, and one to program the next notifier context's
handle. When the hardware processes the first update, the CRC for the
current frame is lost. However, the second update can be flushed
immediately without waiting for the first to complete so that CRC
generation resumes on the next frame. According to Nvidia's hardware
engineers, there isn't any cleaner way of flipping notifier contexts
that would avoid this.
Since using vblank workers to swap out the notifier context will ensure
we can usually flush both updates to hardware within the timespan of a
single frame, we can also ensure that there will only be exactly one
frame lost between the first and second update being executed by the
hardware. This gives us the guarantee that we're always correctly
matching each CRC entry with it's respective frame even after a context
flip. And since IGT will retrieve the CRC entry for a frame by waiting
until it receives a CRC for any subsequent frames, this doesn't cause an
issue with any tests and is much simpler than trying to change the
current DRM API to accommodate.
In order to facilitate testing of correct handling of this limitation,
we also expose a debugfs interface to manually control the threshold for
when we start trying to flip the notifier context. We will use this in
igt to trigger a context flip for testing purposes without needing to
wait for the notifier to completely fill up. This threshold is reset
to the default value set by nouveau after each capture, and is exposed
in a separate folder within each CRTC's debugfs directory labelled
"nv_crc".
Changes since v1:
* Forgot to finish saving crc.h before saving, whoops. This just adds
some corrections to the empty function declarations that we use if
CONFIG_DEBUG_FS isn't enabled.
Changes since v2:
* Don't check return code from debugfs_create_dir() or
debugfs_create_file() - Greg K-H
Changes since v3:
(no functional changes)
* Fix SPDX license identifiers (checkpatch)
* s/uint32_t/u32/ (checkpatch)
* Fix indenting in switch cases (checkpatch)
Changes since v4:
* Remove unneeded param changes with nv50_head_flush_clr/set
* Rebase
Changes since v5:
* Remove set but unused variable (outp) in nv50_crc_atomic_check() -
Kbuild bot
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Acked-by: Dave Airlie <airlied@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200627194657.156514-10-lyude@redhat.com
2019-10-07 14:20:12 -04:00
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
/* Update output path(s). */
|
|
|
|
list_for_each_entry_safe(outp, outt, &atom->outp, head) {
|
|
|
|
const struct drm_encoder_helper_funcs *help;
|
|
|
|
struct drm_encoder *encoder;
|
|
|
|
|
|
|
|
encoder = outp->encoder;
|
|
|
|
help = encoder->helper_private;
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
|
|
|
|
outp->set.mask, outp->clr.mask);
|
|
|
|
|
|
|
|
if (outp->set.mask) {
|
2020-08-26 14:24:42 -04:00
|
|
|
help->atomic_enable(encoder, state);
|
2018-05-08 20:39:47 +10:00
|
|
|
interlock[NV50_DISP_INTERLOCK_CORE] = 1;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
list_del(&outp->head);
|
|
|
|
kfree(outp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update head(s). */
|
2017-08-15 10:52:50 +02:00
|
|
|
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
2017-07-19 16:39:19 +02:00
|
|
|
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nv50_head *head = nv50_head(crtc);
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
|
|
|
|
asyh->set.mask, asyh->clr.mask);
|
|
|
|
|
|
|
|
if (asyh->set.mask) {
|
|
|
|
nv50_head_flush_set(head, asyh);
|
2018-05-08 20:39:47 +10:00
|
|
|
interlock[NV50_DISP_INTERLOCK_CORE] = 1;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
2017-08-15 10:52:50 +02:00
|
|
|
if (new_crtc_state->active) {
|
2019-08-07 19:47:06 -04:00
|
|
|
if (!old_crtc_state->active) {
|
2017-07-24 11:01:52 +10:00
|
|
|
drm_crtc_vblank_on(crtc);
|
2019-08-07 19:47:06 -04:00
|
|
|
pm_runtime_get_noresume(dev->dev);
|
|
|
|
}
|
2017-08-15 10:52:50 +02:00
|
|
|
if (new_crtc_state->event)
|
2017-07-24 11:01:52 +10:00
|
|
|
drm_crtc_vblank_get(crtc);
|
|
|
|
}
|
2017-01-24 09:32:26 +10:00
|
|
|
}
|
|
|
|
|
2020-02-03 03:36:30 -05:00
|
|
|
/* Update window->head assignment.
|
|
|
|
*
|
|
|
|
* This has to happen in an update that's not interlocked with
|
|
|
|
* any window channels to avoid hitting HW error checks.
|
|
|
|
*
|
|
|
|
*TODO: Proper handling of window ownership (Turing apparently
|
|
|
|
* supports non-fixed mappings).
|
|
|
|
*/
|
|
|
|
if (core->assign_windows) {
|
|
|
|
core->func->wndw.owner(core);
|
2020-07-23 20:10:42 +10:00
|
|
|
nv50_disp_atomic_commit_core(state, interlock);
|
2020-02-03 03:36:30 -05:00
|
|
|
core->assign_windows = false;
|
|
|
|
interlock[NV50_DISP_INTERLOCK_CORE] = 0;
|
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
/* Update plane(s). */
|
2017-07-19 16:39:19 +02:00
|
|
|
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
|
|
|
|
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nv50_wndw *wndw = nv50_wndw(plane);
|
|
|
|
|
|
|
|
NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
|
|
|
|
asyw->set.mask, asyw->clr.mask);
|
|
|
|
if ( !asyw->set.mask &&
|
|
|
|
(!asyw->clr.mask || atom->flush_disable))
|
|
|
|
continue;
|
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
nv50_wndw_flush_set(wndw, interlock, asyw);
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Flush update. */
|
2018-07-03 10:52:34 +10:00
|
|
|
nv50_disp_atomic_commit_wndw(state, interlock);
|
2018-05-08 20:39:47 +10:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
|
|
|
|
if (interlock[NV50_DISP_INTERLOCK_BASE] ||
|
2018-07-03 10:52:34 +10:00
|
|
|
interlock[NV50_DISP_INTERLOCK_OVLY] ||
|
|
|
|
interlock[NV50_DISP_INTERLOCK_WNDW] ||
|
2018-05-08 20:39:47 +10:00
|
|
|
!atom->state.legacy_cursor_update)
|
2018-07-03 10:52:34 +10:00
|
|
|
nv50_disp_atomic_commit_core(state, interlock);
|
2018-05-08 20:39:47 +10:00
|
|
|
else
|
2018-05-08 20:39:47 +10:00
|
|
|
disp->core->func->update(disp->core, interlock, false);
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
if (atom->lock_core)
|
|
|
|
mutex_unlock(&disp->mutex);
|
|
|
|
|
|
|
|
/* Wait for HW to signal completion. */
|
2017-07-19 16:39:19 +02:00
|
|
|
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
|
|
|
|
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nv50_wndw *wndw = nv50_wndw(plane);
|
|
|
|
int ret = nv50_wndw_wait_armed(wndw, asyw);
|
|
|
|
if (ret)
|
|
|
|
NV_ERROR(drm, "%s: timeout\n", plane->name);
|
|
|
|
}
|
|
|
|
|
2017-07-19 16:39:19 +02:00
|
|
|
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
|
|
|
if (new_crtc_state->event) {
|
2016-11-04 17:20:36 +10:00
|
|
|
unsigned long flags;
|
2016-11-23 07:58:54 +01:00
|
|
|
/* Get correct count/ts if racing with vblank irq */
|
2017-08-15 10:52:50 +02:00
|
|
|
if (new_crtc_state->active)
|
2017-08-15 16:16:58 +10:00
|
|
|
drm_crtc_accurate_vblank_count(crtc);
|
2016-11-04 17:20:36 +10:00
|
|
|
spin_lock_irqsave(&crtc->dev->event_lock, flags);
|
2017-07-19 16:39:19 +02:00
|
|
|
drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
|
2016-11-04 17:20:36 +10:00
|
|
|
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
2017-08-15 10:52:50 +02:00
|
|
|
|
2017-07-19 16:39:19 +02:00
|
|
|
new_crtc_state->event = NULL;
|
2017-08-15 10:52:50 +02:00
|
|
|
if (new_crtc_state->active)
|
2017-07-24 11:01:52 +10:00
|
|
|
drm_crtc_vblank_put(crtc);
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
drm/nouveau/kms/nvd9-: Add CRC support
This introduces support for CRC readback on gf119+, using the
documentation generously provided to us by Nvidia:
https://github.com/NVIDIA/open-gpu-doc/blob/master/Display-CRC/display-crc.txt
We expose all available CRC sources. SF, SOR, PIOR, and DAC are exposed
through a single set of "outp" sources: outp-active/auto for a CRC of
the scanout region, outp-complete for a CRC of both the scanout and
blanking/sync region combined, and outp-inactive for a CRC of only the
blanking/sync region. For each source, nouveau selects the appropriate
tap point based on the output path in use. We also expose an "rg"
source, which allows for capturing CRCs of the scanout raster before
it's encoded into a video signal in the output path. This tap point is
referred to as the raster generator.
Note that while there's some other neat features that can be used with
CRC capture on nvidia hardware, like capturing from two CRC sources
simultaneously, I couldn't see any usecase for them and did not
implement them.
Nvidia only allows for accessing CRCs through a shared DMA region that
we program through the core EVO/NvDisplay channel which is referred to
as the notifier context. The notifier context is limited to either 255
(for Fermi-Pascal) or 2047 (Volta+) entries to store CRCs in, and
unfortunately the hardware simply drops CRCs and reports an overflow
once all available entries in the notifier context are filled.
Since the DRM CRC API and igt-gpu-tools don't expect there to be a limit
on how many CRCs can be captured, we work around this in nouveau by
allocating two separate notifier contexts for each head instead of one.
We schedule a vblank worker ahead of time so that once we start getting
close to filling up all of the available entries in the notifier
context, we can swap the currently used notifier context out with
another pre-prepared notifier context in a manner similar to page
flipping.
Unfortunately, the hardware only allows us to this by flushing two
separate updates on the core channel: one to release the current
notifier context handle, and one to program the next notifier context's
handle. When the hardware processes the first update, the CRC for the
current frame is lost. However, the second update can be flushed
immediately without waiting for the first to complete so that CRC
generation resumes on the next frame. According to Nvidia's hardware
engineers, there isn't any cleaner way of flipping notifier contexts
that would avoid this.
Since using vblank workers to swap out the notifier context will ensure
we can usually flush both updates to hardware within the timespan of a
single frame, we can also ensure that there will only be exactly one
frame lost between the first and second update being executed by the
hardware. This gives us the guarantee that we're always correctly
matching each CRC entry with it's respective frame even after a context
flip. And since IGT will retrieve the CRC entry for a frame by waiting
until it receives a CRC for any subsequent frames, this doesn't cause an
issue with any tests and is much simpler than trying to change the
current DRM API to accommodate.
In order to facilitate testing of correct handling of this limitation,
we also expose a debugfs interface to manually control the threshold for
when we start trying to flip the notifier context. We will use this in
igt to trigger a context flip for testing purposes without needing to
wait for the notifier to completely fill up. This threshold is reset
to the default value set by nouveau after each capture, and is exposed
in a separate folder within each CRTC's debugfs directory labelled
"nv_crc".
Changes since v1:
* Forgot to finish saving crc.h before saving, whoops. This just adds
some corrections to the empty function declarations that we use if
CONFIG_DEBUG_FS isn't enabled.
Changes since v2:
* Don't check return code from debugfs_create_dir() or
debugfs_create_file() - Greg K-H
Changes since v3:
(no functional changes)
* Fix SPDX license identifiers (checkpatch)
* s/uint32_t/u32/ (checkpatch)
* Fix indenting in switch cases (checkpatch)
Changes since v4:
* Remove unneeded param changes with nv50_head_flush_clr/set
* Rebase
Changes since v5:
* Remove set but unused variable (outp) in nv50_crc_atomic_check() -
Kbuild bot
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Acked-by: Dave Airlie <airlied@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200627194657.156514-10-lyude@redhat.com
2019-10-07 14:20:12 -04:00
|
|
|
nv50_crc_atomic_start_reporting(state);
|
2020-06-29 18:36:25 -04:00
|
|
|
if (!flushed)
|
|
|
|
nv50_crc_atomic_release_notifier_contexts(state);
|
2016-11-04 17:20:36 +10:00
|
|
|
drm_atomic_helper_commit_hw_done(state);
|
|
|
|
drm_atomic_helper_cleanup_planes(dev, state);
|
|
|
|
drm_atomic_helper_commit_cleanup_done(state);
|
|
|
|
drm_atomic_state_put(state);
|
2019-08-07 19:47:06 -04:00
|
|
|
|
|
|
|
/* Drop the RPM ref we got from nv50_disp_atomic_commit() */
|
|
|
|
pm_runtime_mark_last_busy(dev->dev);
|
|
|
|
pm_runtime_put_autosuspend(dev->dev);
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_disp_atomic_commit_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct drm_atomic_state *state =
|
|
|
|
container_of(work, typeof(*state), commit_work);
|
|
|
|
nv50_disp_atomic_commit_tail(state);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_disp_atomic_commit(struct drm_device *dev,
|
|
|
|
struct drm_atomic_state *state, bool nonblock)
|
|
|
|
{
|
2017-11-01 09:12:25 +10:00
|
|
|
struct drm_plane_state *new_plane_state;
|
2016-11-04 17:20:36 +10:00
|
|
|
struct drm_plane *plane;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
ret = pm_runtime_get_sync(dev->dev);
|
2020-06-13 20:29:18 -05:00
|
|
|
if (ret < 0 && ret != -EACCES) {
|
|
|
|
pm_runtime_put_autosuspend(dev->dev);
|
2016-11-04 17:20:36 +10:00
|
|
|
return ret;
|
2020-06-13 20:29:18 -05:00
|
|
|
}
|
2016-11-04 17:20:36 +10:00
|
|
|
|
|
|
|
ret = drm_atomic_helper_setup_commit(state, nonblock);
|
|
|
|
if (ret)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
|
|
|
|
|
|
|
|
ret = drm_atomic_helper_prepare_planes(dev, state);
|
|
|
|
if (ret)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
if (!nonblock) {
|
|
|
|
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
|
|
|
|
if (ret)
|
2017-07-11 16:33:03 +02:00
|
|
|
goto err_cleanup;
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
2017-07-11 16:33:05 +02:00
|
|
|
ret = drm_atomic_helper_swap_state(state, true);
|
|
|
|
if (ret)
|
|
|
|
goto err_cleanup;
|
|
|
|
|
2017-11-01 09:12:25 +10:00
|
|
|
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
|
|
|
|
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
|
2016-11-04 17:20:36 +10:00
|
|
|
struct nv50_wndw *wndw = nv50_wndw(plane);
|
2017-07-19 16:39:19 +02:00
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
if (asyw->set.image)
|
|
|
|
nv50_wndw_ntfy_enable(wndw, asyw);
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
drm_atomic_state_get(state);
|
|
|
|
|
2019-08-07 19:47:06 -04:00
|
|
|
/*
|
|
|
|
* Grab another RPM ref for the commit tail, which will release the
|
|
|
|
* ref when it's finished
|
|
|
|
*/
|
|
|
|
pm_runtime_get_noresume(dev->dev);
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
if (nonblock)
|
|
|
|
queue_work(system_unbound_wq, &state->commit_work);
|
|
|
|
else
|
|
|
|
nv50_disp_atomic_commit_tail(state);
|
|
|
|
|
2017-07-11 16:33:03 +02:00
|
|
|
err_cleanup:
|
|
|
|
if (ret)
|
|
|
|
drm_atomic_helper_cleanup_planes(dev, state);
|
2016-11-04 17:20:36 +10:00
|
|
|
done:
|
|
|
|
pm_runtime_put_autosuspend(dev->dev);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct nv50_outp_atom *
|
|
|
|
nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
|
|
|
|
{
|
|
|
|
struct nv50_outp_atom *outp;
|
|
|
|
|
|
|
|
list_for_each_entry(outp, &atom->outp, head) {
|
|
|
|
if (outp->encoder == encoder)
|
|
|
|
return outp;
|
|
|
|
}
|
|
|
|
|
|
|
|
outp = kzalloc(sizeof(*outp), GFP_KERNEL);
|
|
|
|
if (!outp)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
list_add(&outp->head, &atom->outp);
|
|
|
|
outp->encoder = encoder;
|
|
|
|
return outp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
|
2017-07-19 16:39:19 +02:00
|
|
|
struct drm_connector_state *old_connector_state)
|
2016-11-04 17:20:36 +10:00
|
|
|
{
|
2017-07-19 16:39:19 +02:00
|
|
|
struct drm_encoder *encoder = old_connector_state->best_encoder;
|
|
|
|
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
2016-11-04 17:20:36 +10:00
|
|
|
struct drm_crtc *crtc;
|
|
|
|
struct nv50_outp_atom *outp;
|
|
|
|
|
2017-07-19 16:39:19 +02:00
|
|
|
if (!(crtc = old_connector_state->crtc))
|
2016-11-04 17:20:36 +10:00
|
|
|
return 0;
|
|
|
|
|
2017-07-19 16:39:19 +02:00
|
|
|
old_crtc_state = drm_atomic_get_old_crtc_state(&atom->state, crtc);
|
|
|
|
new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
|
|
|
|
if (old_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
|
2016-11-04 17:20:36 +10:00
|
|
|
outp = nv50_disp_outp_atomic_add(atom, encoder);
|
|
|
|
if (IS_ERR(outp))
|
|
|
|
return PTR_ERR(outp);
|
|
|
|
|
|
|
|
if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
|
|
|
|
outp->flush_disable = true;
|
|
|
|
atom->flush_disable = true;
|
|
|
|
}
|
|
|
|
outp->clr.ctrl = true;
|
|
|
|
atom->lock_core = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
|
|
|
|
struct drm_connector_state *connector_state)
|
|
|
|
{
|
|
|
|
struct drm_encoder *encoder = connector_state->best_encoder;
|
2017-07-19 16:39:19 +02:00
|
|
|
struct drm_crtc_state *new_crtc_state;
|
2016-11-04 17:20:36 +10:00
|
|
|
struct drm_crtc *crtc;
|
|
|
|
struct nv50_outp_atom *outp;
|
|
|
|
|
|
|
|
if (!(crtc = connector_state->crtc))
|
|
|
|
return 0;
|
|
|
|
|
2017-07-19 16:39:19 +02:00
|
|
|
new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
|
|
|
|
if (new_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
|
2016-11-04 17:20:36 +10:00
|
|
|
outp = nv50_disp_outp_atomic_add(atom, encoder);
|
|
|
|
if (IS_ERR(outp))
|
|
|
|
return PTR_ERR(outp);
|
|
|
|
|
|
|
|
outp->set.ctrl = true;
|
|
|
|
atom->lock_core = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
struct nv50_atom *atom = nv50_atom(state);
|
drm/nouveau/kms/nv140-: Track wndw mappings in nv50_head_atom
While we're not quite ready yet to add support for flexible wndw
mappings, we are going to need to at least keep track of the static wndw
mappings we're currently using in each head's atomic state. We'll likely
use this in the future to implement real flexible window mapping, but
the primary reason we'll need this is for CRC support.
See: on nvidia hardware, each CRC entry in the CRC notifier dma context
has a "tag". This tag corresponds to the nth update on a specific
EVO/NvDisplay channel, which itself is referred to as the "controlling
channel". For gf119+ this can be the core channel, ovly channel, or base
channel. Since we don't expose CRC entry tags to userspace, we simply
ignore this feature and always use the core channel as the controlling
channel. Simple.
Things get a little bit more complicated on gv100+ though. GV100+ only
lets us set the controlling channel to a specific wndw channel, and that
wndw must be owned by the head that we're grabbing CRCs when we enable
CRC generation. Thus, we always need to make sure that each atomic head
state has at least one wndw that is mapped to the head, which will be
used as the controlling channel.
Note that since we don't have flexible wndw mappings yet, we don't
expect to run into any scenarios yet where we'd have a head with no
mapped wndws. When we do add support for flexible wndw mappings however,
we'll need to make sure that we handle reprogramming CRC capture if our
controlling wndw is moved to another head (and potentially reject the
new head state entirely if we can't find another available wndw to
replace it).
With that being said, nouveau currently tracks wndw visibility on heads.
It does not keep track of the actual ownership mappings, which are
(currently) statically programmed. To fix this, we introduce another
bitmask into nv50_head_atom.wndw to keep track of ownership separately
from visibility. We then introduce a nv50_head callback to handle
populating the wndw ownership map, and call it during the atomic check
phase when core->assign_windows is set to true.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Acked-by: Dave Airlie <airlied@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200627194657.156514-7-lyude@redhat.com
2020-02-06 14:37:36 -05:00
|
|
|
struct nv50_core *core = nv50_disp(dev)->core;
|
2017-07-19 16:39:19 +02:00
|
|
|
struct drm_connector_state *old_connector_state, *new_connector_state;
|
2016-11-04 17:20:36 +10:00
|
|
|
struct drm_connector *connector;
|
2018-05-08 20:39:47 +10:00
|
|
|
struct drm_crtc_state *new_crtc_state;
|
|
|
|
struct drm_crtc *crtc;
|
drm/nouveau/kms/nv140-: Track wndw mappings in nv50_head_atom
While we're not quite ready yet to add support for flexible wndw
mappings, we are going to need to at least keep track of the static wndw
mappings we're currently using in each head's atomic state. We'll likely
use this in the future to implement real flexible window mapping, but
the primary reason we'll need this is for CRC support.
See: on nvidia hardware, each CRC entry in the CRC notifier dma context
has a "tag". This tag corresponds to the nth update on a specific
EVO/NvDisplay channel, which itself is referred to as the "controlling
channel". For gf119+ this can be the core channel, ovly channel, or base
channel. Since we don't expose CRC entry tags to userspace, we simply
ignore this feature and always use the core channel as the controlling
channel. Simple.
Things get a little bit more complicated on gv100+ though. GV100+ only
lets us set the controlling channel to a specific wndw channel, and that
wndw must be owned by the head that we're grabbing CRCs when we enable
CRC generation. Thus, we always need to make sure that each atomic head
state has at least one wndw that is mapped to the head, which will be
used as the controlling channel.
Note that since we don't have flexible wndw mappings yet, we don't
expect to run into any scenarios yet where we'd have a head with no
mapped wndws. When we do add support for flexible wndw mappings however,
we'll need to make sure that we handle reprogramming CRC capture if our
controlling wndw is moved to another head (and potentially reject the
new head state entirely if we can't find another available wndw to
replace it).
With that being said, nouveau currently tracks wndw visibility on heads.
It does not keep track of the actual ownership mappings, which are
(currently) statically programmed. To fix this, we introduce another
bitmask into nv50_head_atom.wndw to keep track of ownership separately
from visibility. We then introduce a nv50_head callback to handle
populating the wndw ownership map, and call it during the atomic check
phase when core->assign_windows is set to true.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Acked-by: Dave Airlie <airlied@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200627194657.156514-7-lyude@redhat.com
2020-02-06 14:37:36 -05:00
|
|
|
struct nv50_head *head;
|
|
|
|
struct nv50_head_atom *asyh;
|
2016-11-04 17:20:36 +10:00
|
|
|
int ret, i;
|
|
|
|
|
drm/nouveau/kms/nv140-: Track wndw mappings in nv50_head_atom
While we're not quite ready yet to add support for flexible wndw
mappings, we are going to need to at least keep track of the static wndw
mappings we're currently using in each head's atomic state. We'll likely
use this in the future to implement real flexible window mapping, but
the primary reason we'll need this is for CRC support.
See: on nvidia hardware, each CRC entry in the CRC notifier dma context
has a "tag". This tag corresponds to the nth update on a specific
EVO/NvDisplay channel, which itself is referred to as the "controlling
channel". For gf119+ this can be the core channel, ovly channel, or base
channel. Since we don't expose CRC entry tags to userspace, we simply
ignore this feature and always use the core channel as the controlling
channel. Simple.
Things get a little bit more complicated on gv100+ though. GV100+ only
lets us set the controlling channel to a specific wndw channel, and that
wndw must be owned by the head that we're grabbing CRCs when we enable
CRC generation. Thus, we always need to make sure that each atomic head
state has at least one wndw that is mapped to the head, which will be
used as the controlling channel.
Note that since we don't have flexible wndw mappings yet, we don't
expect to run into any scenarios yet where we'd have a head with no
mapped wndws. When we do add support for flexible wndw mappings however,
we'll need to make sure that we handle reprogramming CRC capture if our
controlling wndw is moved to another head (and potentially reject the
new head state entirely if we can't find another available wndw to
replace it).
With that being said, nouveau currently tracks wndw visibility on heads.
It does not keep track of the actual ownership mappings, which are
(currently) statically programmed. To fix this, we introduce another
bitmask into nv50_head_atom.wndw to keep track of ownership separately
from visibility. We then introduce a nv50_head callback to handle
populating the wndw ownership map, and call it during the atomic check
phase when core->assign_windows is set to true.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Acked-by: Dave Airlie <airlied@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200627194657.156514-7-lyude@redhat.com
2020-02-06 14:37:36 -05:00
|
|
|
if (core->assign_windows && core->func->head->static_wndw_map) {
|
|
|
|
drm_for_each_crtc(crtc, dev) {
|
|
|
|
new_crtc_state = drm_atomic_get_crtc_state(state,
|
|
|
|
crtc);
|
|
|
|
if (IS_ERR(new_crtc_state))
|
|
|
|
return PTR_ERR(new_crtc_state);
|
|
|
|
|
|
|
|
head = nv50_head(crtc);
|
|
|
|
asyh = nv50_head_atom(new_crtc_state);
|
|
|
|
core->func->head->static_wndw_map(head, asyh);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-08 20:39:47 +10:00
|
|
|
/* We need to handle colour management on a per-plane basis. */
|
|
|
|
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
|
|
|
if (new_crtc_state->color_mgmt_changed) {
|
|
|
|
ret = drm_atomic_add_affected_planes(state, crtc);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
ret = drm_atomic_helper_check(dev, state);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-07-19 16:39:19 +02:00
|
|
|
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
|
|
|
|
ret = nv50_disp_outp_atomic_check_clr(atom, old_connector_state);
|
2016-11-04 17:20:36 +10:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-07-19 16:39:19 +02:00
|
|
|
ret = nv50_disp_outp_atomic_check_set(atom, new_connector_state);
|
2016-11-04 17:20:36 +10:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-01-10 19:53:43 -05:00
|
|
|
ret = drm_dp_mst_atomic_check(state);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2020-06-29 18:36:25 -04:00
|
|
|
nv50_crc_atomic_check_outp(atom);
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
struct nv50_atom *atom = nv50_atom(state);
|
|
|
|
struct nv50_outp_atom *outp, *outt;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(outp, outt, &atom->outp, head) {
|
|
|
|
list_del(&outp->head);
|
|
|
|
kfree(outp);
|
|
|
|
}
|
|
|
|
|
|
|
|
drm_atomic_state_default_clear(state);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nv50_disp_atomic_state_free(struct drm_atomic_state *state)
|
|
|
|
{
|
|
|
|
struct nv50_atom *atom = nv50_atom(state);
|
|
|
|
drm_atomic_state_default_release(&atom->state);
|
|
|
|
kfree(atom);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct drm_atomic_state *
|
|
|
|
nv50_disp_atomic_state_alloc(struct drm_device *dev)
|
|
|
|
{
|
|
|
|
struct nv50_atom *atom;
|
|
|
|
if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
|
|
|
|
drm_atomic_state_init(dev, &atom->state) < 0) {
|
|
|
|
kfree(atom);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
INIT_LIST_HEAD(&atom->outp);
|
|
|
|
return &atom->state;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct drm_mode_config_funcs
|
|
|
|
nv50_disp_func = {
|
|
|
|
.fb_create = nouveau_user_framebuffer_create,
|
drm/nouveau/drm/nouveau: Fix deadlock with fb_helper with async RPM requests
Currently, nouveau uses the generic drm_fb_helper_output_poll_changed()
function provided by DRM as it's output_poll_changed callback.
Unfortunately however, this function doesn't grab runtime PM references
early enough and even if it did-we can't block waiting for the device to
resume in output_poll_changed() since it's very likely that we'll need
to grab the fb_helper lock at some point during the runtime resume
process. This currently results in deadlocking like so:
[ 246.669625] INFO: task kworker/4:0:37 blocked for more than 120 seconds.
[ 246.673398] Not tainted 4.18.0-rc5Lyude-Test+ #2
[ 246.675271] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
[ 246.676527] kworker/4:0 D 0 37 2 0x80000000
[ 246.677580] Workqueue: events output_poll_execute [drm_kms_helper]
[ 246.678704] Call Trace:
[ 246.679753] __schedule+0x322/0xaf0
[ 246.680916] schedule+0x33/0x90
[ 246.681924] schedule_preempt_disabled+0x15/0x20
[ 246.683023] __mutex_lock+0x569/0x9a0
[ 246.684035] ? kobject_uevent_env+0x117/0x7b0
[ 246.685132] ? drm_fb_helper_hotplug_event.part.28+0x20/0xb0 [drm_kms_helper]
[ 246.686179] mutex_lock_nested+0x1b/0x20
[ 246.687278] ? mutex_lock_nested+0x1b/0x20
[ 246.688307] drm_fb_helper_hotplug_event.part.28+0x20/0xb0 [drm_kms_helper]
[ 246.689420] drm_fb_helper_output_poll_changed+0x23/0x30 [drm_kms_helper]
[ 246.690462] drm_kms_helper_hotplug_event+0x2a/0x30 [drm_kms_helper]
[ 246.691570] output_poll_execute+0x198/0x1c0 [drm_kms_helper]
[ 246.692611] process_one_work+0x231/0x620
[ 246.693725] worker_thread+0x214/0x3a0
[ 246.694756] kthread+0x12b/0x150
[ 246.695856] ? wq_pool_ids_show+0x140/0x140
[ 246.696888] ? kthread_create_worker_on_cpu+0x70/0x70
[ 246.697998] ret_from_fork+0x3a/0x50
[ 246.699034] INFO: task kworker/0:1:60 blocked for more than 120 seconds.
[ 246.700153] Not tainted 4.18.0-rc5Lyude-Test+ #2
[ 246.701182] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
[ 246.702278] kworker/0:1 D 0 60 2 0x80000000
[ 246.703293] Workqueue: pm pm_runtime_work
[ 246.704393] Call Trace:
[ 246.705403] __schedule+0x322/0xaf0
[ 246.706439] ? wait_for_completion+0x104/0x190
[ 246.707393] schedule+0x33/0x90
[ 246.708375] schedule_timeout+0x3a5/0x590
[ 246.709289] ? mark_held_locks+0x58/0x80
[ 246.710208] ? _raw_spin_unlock_irq+0x2c/0x40
[ 246.711222] ? wait_for_completion+0x104/0x190
[ 246.712134] ? trace_hardirqs_on_caller+0xf4/0x190
[ 246.713094] ? wait_for_completion+0x104/0x190
[ 246.713964] wait_for_completion+0x12c/0x190
[ 246.714895] ? wake_up_q+0x80/0x80
[ 246.715727] ? get_work_pool+0x90/0x90
[ 246.716649] flush_work+0x1c9/0x280
[ 246.717483] ? flush_workqueue_prep_pwqs+0x1b0/0x1b0
[ 246.718442] __cancel_work_timer+0x146/0x1d0
[ 246.719247] cancel_delayed_work_sync+0x13/0x20
[ 246.720043] drm_kms_helper_poll_disable+0x1f/0x30 [drm_kms_helper]
[ 246.721123] nouveau_pmops_runtime_suspend+0x3d/0xb0 [nouveau]
[ 246.721897] pci_pm_runtime_suspend+0x6b/0x190
[ 246.722825] ? pci_has_legacy_pm_support+0x70/0x70
[ 246.723737] __rpm_callback+0x7a/0x1d0
[ 246.724721] ? pci_has_legacy_pm_support+0x70/0x70
[ 246.725607] rpm_callback+0x24/0x80
[ 246.726553] ? pci_has_legacy_pm_support+0x70/0x70
[ 246.727376] rpm_suspend+0x142/0x6b0
[ 246.728185] pm_runtime_work+0x97/0xc0
[ 246.728938] process_one_work+0x231/0x620
[ 246.729796] worker_thread+0x44/0x3a0
[ 246.730614] kthread+0x12b/0x150
[ 246.731395] ? wq_pool_ids_show+0x140/0x140
[ 246.732202] ? kthread_create_worker_on_cpu+0x70/0x70
[ 246.732878] ret_from_fork+0x3a/0x50
[ 246.733768] INFO: task kworker/4:2:422 blocked for more than 120 seconds.
[ 246.734587] Not tainted 4.18.0-rc5Lyude-Test+ #2
[ 246.735393] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
[ 246.736113] kworker/4:2 D 0 422 2 0x80000080
[ 246.736789] Workqueue: events_long drm_dp_mst_link_probe_work [drm_kms_helper]
[ 246.737665] Call Trace:
[ 246.738490] __schedule+0x322/0xaf0
[ 246.739250] schedule+0x33/0x90
[ 246.739908] rpm_resume+0x19c/0x850
[ 246.740750] ? finish_wait+0x90/0x90
[ 246.741541] __pm_runtime_resume+0x4e/0x90
[ 246.742370] nv50_disp_atomic_commit+0x31/0x210 [nouveau]
[ 246.743124] drm_atomic_commit+0x4a/0x50 [drm]
[ 246.743775] restore_fbdev_mode_atomic+0x1c8/0x240 [drm_kms_helper]
[ 246.744603] restore_fbdev_mode+0x31/0x140 [drm_kms_helper]
[ 246.745373] drm_fb_helper_restore_fbdev_mode_unlocked+0x54/0xb0 [drm_kms_helper]
[ 246.746220] drm_fb_helper_set_par+0x2d/0x50 [drm_kms_helper]
[ 246.746884] drm_fb_helper_hotplug_event.part.28+0x96/0xb0 [drm_kms_helper]
[ 246.747675] drm_fb_helper_output_poll_changed+0x23/0x30 [drm_kms_helper]
[ 246.748544] drm_kms_helper_hotplug_event+0x2a/0x30 [drm_kms_helper]
[ 246.749439] nv50_mstm_hotplug+0x15/0x20 [nouveau]
[ 246.750111] drm_dp_send_link_address+0x177/0x1c0 [drm_kms_helper]
[ 246.750764] drm_dp_check_and_send_link_address+0xa8/0xd0 [drm_kms_helper]
[ 246.751602] drm_dp_mst_link_probe_work+0x51/0x90 [drm_kms_helper]
[ 246.752314] process_one_work+0x231/0x620
[ 246.752979] worker_thread+0x44/0x3a0
[ 246.753838] kthread+0x12b/0x150
[ 246.754619] ? wq_pool_ids_show+0x140/0x140
[ 246.755386] ? kthread_create_worker_on_cpu+0x70/0x70
[ 246.756162] ret_from_fork+0x3a/0x50
[ 246.756847]
Showing all locks held in the system:
[ 246.758261] 3 locks held by kworker/4:0/37:
[ 246.759016] #0: 00000000f8df4d2d ((wq_completion)"events"){+.+.}, at: process_one_work+0x1b3/0x620
[ 246.759856] #1: 00000000e6065461 ((work_completion)(&(&dev->mode_config.output_poll_work)->work)){+.+.}, at: process_one_work+0x1b3/0x620
[ 246.760670] #2: 00000000cb66735f (&helper->lock){+.+.}, at: drm_fb_helper_hotplug_event.part.28+0x20/0xb0 [drm_kms_helper]
[ 246.761516] 2 locks held by kworker/0:1/60:
[ 246.762274] #0: 00000000fff6be0f ((wq_completion)"pm"){+.+.}, at: process_one_work+0x1b3/0x620
[ 246.762982] #1: 000000005ab44fb4 ((work_completion)(&dev->power.work)){+.+.}, at: process_one_work+0x1b3/0x620
[ 246.763890] 1 lock held by khungtaskd/64:
[ 246.764664] #0: 000000008cb8b5c3 (rcu_read_lock){....}, at: debug_show_all_locks+0x23/0x185
[ 246.765588] 5 locks held by kworker/4:2/422:
[ 246.766440] #0: 00000000232f0959 ((wq_completion)"events_long"){+.+.}, at: process_one_work+0x1b3/0x620
[ 246.767390] #1: 00000000bb59b134 ((work_completion)(&mgr->work)){+.+.}, at: process_one_work+0x1b3/0x620
[ 246.768154] #2: 00000000cb66735f (&helper->lock){+.+.}, at: drm_fb_helper_restore_fbdev_mode_unlocked+0x4c/0xb0 [drm_kms_helper]
[ 246.768966] #3: 000000004c8f0b6b (crtc_ww_class_acquire){+.+.}, at: restore_fbdev_mode_atomic+0x4b/0x240 [drm_kms_helper]
[ 246.769921] #4: 000000004c34a296 (crtc_ww_class_mutex){+.+.}, at: drm_modeset_backoff+0x8a/0x1b0 [drm]
[ 246.770839] 1 lock held by dmesg/1038:
[ 246.771739] 2 locks held by zsh/1172:
[ 246.772650] #0: 00000000836d0438 (&tty->ldisc_sem){++++}, at: ldsem_down_read+0x37/0x40
[ 246.773680] #1: 000000001f4f4d48 (&ldata->atomic_read_lock){+.+.}, at: n_tty_read+0xc1/0x870
[ 246.775522] =============================================
After trying dozens of different solutions, I found one very simple one
that should also have the benefit of preventing us from having to fight
locking for the rest of our lives. So, we work around these deadlocks by
deferring all fbcon hotplug events that happen after the runtime suspend
process starts until after the device is resumed again.
Changes since v7:
- Fixup commit message - Daniel Vetter
Changes since v6:
- Remove unused nouveau_fbcon_hotplugged_in_suspend() - Ilia
Changes since v5:
- Come up with the (hopefully final) solution for solving this dumb
problem, one that is a lot less likely to cause issues with locking in
the future. This should work around all deadlock conditions with fbcon
brought up thus far.
Changes since v4:
- Add nouveau_fbcon_hotplugged_in_suspend() to workaround deadlock
condition that Lukas described
- Just move all of this out of drm_fb_helper. It seems that other DRM
drivers have already figured out other workarounds for this. If other
drivers do end up needing this in the future, we can just move this
back into drm_fb_helper again.
Changes since v3:
- Actually check if fb_helper is NULL in both new helpers
- Actually check drm_fbdev_emulation in both new helpers
- Don't fire off a fb_helper hotplug unconditionally; only do it if
the following conditions are true (as otherwise, calling this in the
wrong spot will cause Bad Things to happen):
- fb_helper hotplug handling was actually inhibited previously
- fb_helper actually has a delayed hotplug pending
- fb_helper is actually bound
- fb_helper is actually initialized
- Add __must_check to drm_fb_helper_suspend_hotplug(). There's no
situation where a driver would actually want to use this without
checking the return value, so enforce that
- Rewrite and clarify the documentation for both helpers.
- Make sure to return true in the drm_fb_helper_suspend_hotplug() stub
that's provided in drm_fb_helper.h when CONFIG_DRM_FBDEV_EMULATION
isn't enabled
- Actually grab the toplevel fb_helper lock in
drm_fb_helper_resume_hotplug(), since it's possible other activity
(such as a hotplug) could be going on at the same time the driver
calls drm_fb_helper_resume_hotplug(). We need this to check whether or
not drm_fb_helper_hotplug_event() needs to be called anyway
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Karol Herbst <kherbst@redhat.com>
Acked-by: Daniel Vetter <daniel@ffwll.ch>
Cc: stable@vger.kernel.org
Cc: Lukas Wunner <lukas@wunner.de>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-15 15:00:13 -04:00
|
|
|
.output_poll_changed = nouveau_fbcon_output_poll_changed,
|
2016-11-04 17:20:36 +10:00
|
|
|
.atomic_check = nv50_disp_atomic_check,
|
|
|
|
.atomic_commit = nv50_disp_atomic_commit,
|
|
|
|
.atomic_state_alloc = nv50_disp_atomic_state_alloc,
|
|
|
|
.atomic_state_clear = nv50_disp_atomic_state_clear,
|
|
|
|
.atomic_state_free = nv50_disp_atomic_state_free,
|
|
|
|
};
|
|
|
|
|
2011-07-04 16:25:18 +10:00
|
|
|
/******************************************************************************
|
|
|
|
* Init
|
|
|
|
*****************************************************************************/
|
2014-08-10 04:10:19 +10:00
|
|
|
|
2019-02-12 22:28:13 +10:00
|
|
|
static void
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend)
|
2011-07-04 16:25:18 +10:00
|
|
|
{
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
2016-11-04 17:20:36 +10:00
|
|
|
struct drm_encoder *encoder;
|
2016-11-04 17:20:36 +10:00
|
|
|
struct drm_plane *plane;
|
|
|
|
|
|
|
|
drm_for_each_plane(plane, dev) {
|
|
|
|
struct nv50_wndw *wndw = nv50_wndw(plane);
|
|
|
|
if (plane->funcs != &nv50_wndw)
|
|
|
|
continue;
|
|
|
|
nv50_wndw_fini(wndw);
|
|
|
|
}
|
2016-11-04 17:20:36 +10:00
|
|
|
|
|
|
|
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST)
|
|
|
|
nv50_mstm_fini(nouveau_encoder(encoder));
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
|
|
|
|
if (!runtime)
|
|
|
|
cancel_work_sync(&drm->hpd_work);
|
2011-07-04 16:25:18 +10:00
|
|
|
}
|
|
|
|
|
2019-02-12 22:28:13 +10:00
|
|
|
static int
|
2019-02-12 22:28:13 +10:00
|
|
|
nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
|
2011-07-04 16:25:18 +10:00
|
|
|
{
|
2018-05-08 20:39:47 +10:00
|
|
|
struct nv50_core *core = nv50_disp(dev)->core;
|
2016-11-04 17:20:36 +10:00
|
|
|
struct drm_encoder *encoder;
|
2016-11-04 17:20:36 +10:00
|
|
|
struct drm_plane *plane;
|
2013-03-02 13:21:31 +10:00
|
|
|
|
2020-05-11 18:41:23 -04:00
|
|
|
if (resume || runtime)
|
|
|
|
core->func->init(core);
|
2016-11-04 17:20:36 +10:00
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
|
|
|
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
|
2017-05-19 23:59:35 +10:00
|
|
|
struct nouveau_encoder *nv_encoder =
|
|
|
|
nouveau_encoder(encoder);
|
drm/nouveau/kms/nv50-: Refactor and cleanup DP HPD handling
First some backstory here: Currently, we keep track of whether or not
we've enabled MST or not by trying to piggy-back off the MST helpers.
This means that in order to check whether MST is enabled or not, we
actually need to grab drm_dp_mst_topology_mgr.lock.
Back when I originally wrote this, I did this piggy-backing with the
intention that I'd eventually be teaching our MST helpers how to recover
when an MST device has stopped responding, which in turn would require
the MST helpers having a way of disabling MST independently of the
driver. Note that this was before I reworked locking in the MST helpers,
so at the time we were sticking random things under &mgr->lock - which
grabbing this lock was meant to protect against.
This never came to fruition because doing such a reset safely turned out
to be a lot more painful and impossible then it sounds, and also just
risks us working around issues with our MST handlers that should be
properly fixed instead. Even if it did though, simply calling
drm_dp_mst_topology_mgr_set_mst() from the MST helpers (with the
exception of when we're tearing down our MST managers, that's always OK)
wouldn't have been a bad idea, since drivers like nouveau and i915 need
to do their own book keeping immediately after disabling MST.
So-implementing that would likely require adding a hook for
helper-triggered MST disables anyway.
So, fast forward to now - we want to start adding support for all of the
miscellaneous bits of the DP protocol (for both SST and MST) we're
missing before moving on to supporting more complicated features like
supporting different BPP values on MST, DSC, etc. Since many of these
features only exist on SST and make use of DP HPD IRQs, we want to be
able to atomically check whether we're servicing an MST IRQ or SST IRQ
in nouveau_connector_hotplug(). Currently we literally don't do this at
all, and just handle any kind of possible DP IRQ we could get including
ESIs - even if MST isn't actually enabled.
This would be very complicated and difficult to fix if we need to hold
&mgr->lock while handling SST IRQs to ensure that the MST topology
state doesn't change under us. What we really want here is to do our own
tracking of whether MST is enabled or not, similar to drivers like i915,
and define our own locking order to decomplicate things and avoid
hitting locking issues in the future.
So, let's do this by refactoring our MST probing/enabling code to use
our own MST bookkeeping, along with adding a lock for protecting DP
state that needs to be checked outside of our connector probing
functions. While we're at it, we also remove a bunch of unneeded steps
we perform when probing/enabling MST:
* Enabling bits in MSTM_CTRL before calling drm_dp_mst_topology_mgr_set_mst().
I don't think these ever actually did anything, since the nvif methods
for enabling MST don't actually do anything DPCD related and merely
indicate to nvkm that we've turned on MST.
* Checking the MSTM_CTRL bit is intact when checking the state of an
enabled MST topology in nv50_mstm_detect(). I just added this to be safe
originally, but now that we try reading the DPCD when probing DP
connectors it shouldn't be needed as that will abort our hotplug probing
if the device was removed well before we start checking for MST..
* All of the duplicate DPCD version checks.
This leaves us with much nicer looking code, a much more sensible
locking scheme, and an easy way of checking whether MST is enabled or
not for handling DP HPD IRQs.
v2:
* Get rid of accidental newlines
v4:
* Fix uninitialized usage of mstm in nv50_mstm_detect() - thanks kernel
bot!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-9-lyude@redhat.com
2020-08-26 14:24:44 -04:00
|
|
|
nv50_mstm_init(nv_encoder, runtime);
|
2016-11-04 17:20:36 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
drm_for_each_plane(plane, dev) {
|
|
|
|
struct nv50_wndw *wndw = nv50_wndw(plane);
|
|
|
|
if (plane->funcs != &nv50_wndw)
|
|
|
|
continue;
|
|
|
|
nv50_wndw_init(wndw);
|
|
|
|
}
|
|
|
|
|
2013-03-02 13:21:31 +10:00
|
|
|
return 0;
|
2011-07-04 16:25:18 +10:00
|
|
|
}
|
|
|
|
|
2019-02-12 22:28:13 +10:00
|
|
|
static void
|
2012-11-21 14:40:21 +10:00
|
|
|
nv50_display_destroy(struct drm_device *dev)
|
2011-07-04 16:25:18 +10:00
|
|
|
{
|
2012-11-21 14:40:21 +10:00
|
|
|
struct nv50_disp *disp = nv50_disp(dev);
|
2011-11-12 01:30:24 +10:00
|
|
|
|
2020-01-13 15:17:21 +01:00
|
|
|
nv50_audio_component_fini(nouveau_drm(dev));
|
|
|
|
|
2020-05-11 18:41:24 -04:00
|
|
|
nvif_object_unmap(&disp->caps);
|
2020-03-30 09:51:33 +10:00
|
|
|
nvif_object_dtor(&disp->caps);
|
2018-05-08 20:39:47 +10:00
|
|
|
nv50_core_del(&disp->core);
|
2011-07-04 16:25:18 +10:00
|
|
|
|
2011-11-16 15:48:48 +10:00
|
|
|
nouveau_bo_unmap(disp->sync);
|
2012-11-25 23:04:23 +01:00
|
|
|
if (disp->sync)
|
|
|
|
nouveau_bo_unpin(disp->sync);
|
2011-11-16 15:48:48 +10:00
|
|
|
nouveau_bo_ref(NULL, &disp->sync);
|
2011-07-05 10:33:08 +10:00
|
|
|
|
2012-07-31 16:16:21 +10:00
|
|
|
nouveau_display(dev)->priv = NULL;
|
2011-07-04 16:25:18 +10:00
|
|
|
kfree(disp);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2012-11-21 14:40:21 +10:00
|
|
|
nv50_display_create(struct drm_device *dev)
|
2011-07-04 16:25:18 +10:00
|
|
|
{
|
2016-05-18 13:57:42 +10:00
|
|
|
struct nvif_device *device = &nouveau_drm(dev)->client.device;
|
2012-07-31 16:16:21 +10:00
|
|
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
|
|
|
struct dcb_table *dcb = &drm->vbios.dcb;
|
2011-07-05 13:08:40 +10:00
|
|
|
struct drm_connector *connector, *tmp;
|
2012-11-21 14:40:21 +10:00
|
|
|
struct nv50_disp *disp;
|
2012-07-11 10:44:20 +10:00
|
|
|
struct dcb_output *dcbe;
|
2012-03-04 16:25:59 +10:00
|
|
|
int crtcs, ret, i;
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
bool has_mst = nv50_has_mst(drm);
|
2011-07-04 16:25:18 +10:00
|
|
|
|
|
|
|
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
|
|
|
|
if (!disp)
|
|
|
|
return -ENOMEM;
|
2012-07-31 16:16:21 +10:00
|
|
|
|
2016-11-04 17:20:36 +10:00
|
|
|
mutex_init(&disp->mutex);
|
|
|
|
|
2012-07-31 16:16:21 +10:00
|
|
|
nouveau_display(dev)->priv = disp;
|
2012-11-21 14:40:21 +10:00
|
|
|
nouveau_display(dev)->dtor = nv50_display_destroy;
|
|
|
|
nouveau_display(dev)->init = nv50_display_init;
|
|
|
|
nouveau_display(dev)->fini = nv50_display_fini;
|
2014-08-10 04:10:22 +10:00
|
|
|
disp->disp = &nouveau_display(dev)->disp;
|
2016-11-04 17:20:36 +10:00
|
|
|
dev->mode_config.funcs = &nv50_disp_func;
|
2018-09-05 08:04:40 +02:00
|
|
|
dev->mode_config.quirk_addfb_prefer_xbgr_30bpp = true;
|
2019-06-11 16:40:31 +10:00
|
|
|
dev->mode_config.normalize_zpos = true;
|
2011-07-04 16:25:18 +10:00
|
|
|
|
2012-10-16 14:18:32 +10:00
|
|
|
/* small shared memory area we use for notifiers and semaphores */
|
2020-09-08 14:39:36 +02:00
|
|
|
ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
|
|
|
|
NOUVEAU_GEM_DOMAIN_VRAM,
|
2014-01-09 11:03:15 +01:00
|
|
|
0, 0x0000, NULL, NULL, &disp->sync);
|
2012-10-16 14:18:32 +10:00
|
|
|
if (!ret) {
|
2020-09-08 14:39:36 +02:00
|
|
|
ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true);
|
2012-11-25 23:04:23 +01:00
|
|
|
if (!ret) {
|
2012-10-16 14:18:32 +10:00
|
|
|
ret = nouveau_bo_map(disp->sync);
|
2012-11-25 23:04:23 +01:00
|
|
|
if (ret)
|
|
|
|
nouveau_bo_unpin(disp->sync);
|
|
|
|
}
|
2012-10-16 14:18:32 +10:00
|
|
|
if (ret)
|
|
|
|
nouveau_bo_ref(NULL, &disp->sync);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* allocate master evo channel */
|
2018-05-08 20:39:47 +10:00
|
|
|
ret = nv50_core_new(drm, &disp->core);
|
2012-10-16 14:18:32 +10:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2020-05-11 18:41:23 -04:00
|
|
|
disp->core->func->init(disp->core);
|
2020-05-11 18:41:24 -04:00
|
|
|
if (disp->core->func->caps_init) {
|
|
|
|
ret = disp->core->func->caps_init(drm, disp);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
2020-05-11 18:41:23 -04:00
|
|
|
|
2020-02-10 15:15:53 -08:00
|
|
|
/* Assign the correct format modifiers */
|
|
|
|
if (disp->disp->object.oclass >= TU102_DISP)
|
|
|
|
nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
|
|
|
|
else
|
2020-07-24 13:26:40 +10:00
|
|
|
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
|
2020-02-10 15:15:53 -08:00
|
|
|
nouveau_display(dev)->format_modifiers = disp90xx_modifiers;
|
|
|
|
else
|
|
|
|
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
|
|
|
|
|
2011-07-05 16:48:06 +10:00
|
|
|
/* create crtc objects to represent the hw heads */
|
2018-05-08 20:39:48 +10:00
|
|
|
if (disp->disp->object.oclass >= GV100_DISP)
|
|
|
|
crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
|
|
|
|
else
|
2018-05-08 20:39:47 +10:00
|
|
|
if (disp->disp->object.oclass >= GF110_DISP)
|
2017-07-03 13:06:26 -04:00
|
|
|
crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
|
2012-11-16 11:44:14 +10:00
|
|
|
else
|
2017-07-03 13:06:26 -04:00
|
|
|
crtcs = 0x3;
|
2012-11-16 11:44:14 +10:00
|
|
|
|
2017-07-03 13:06:26 -04:00
|
|
|
for (i = 0; i < fls(crtcs); i++) {
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
struct nv50_head *head;
|
|
|
|
|
2017-07-03 13:06:26 -04:00
|
|
|
if (!(crtcs & (1 << i)))
|
|
|
|
continue;
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
|
|
|
|
head = nv50_head_create(dev, i);
|
|
|
|
if (IS_ERR(head)) {
|
|
|
|
ret = PTR_ERR(head);
|
2011-07-05 16:48:06 +10:00
|
|
|
goto out;
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (has_mst) {
|
|
|
|
head->msto = nv50_msto_new(dev, head, i);
|
|
|
|
if (IS_ERR(head->msto)) {
|
|
|
|
ret = PTR_ERR(head->msto);
|
|
|
|
head->msto = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
2019-09-13 18:03:53 -04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* FIXME: This is a hack to workaround the following
|
|
|
|
* issues:
|
|
|
|
*
|
|
|
|
* https://gitlab.gnome.org/GNOME/mutter/issues/759
|
|
|
|
* https://gitlab.freedesktop.org/xorg/xserver/merge_requests/277
|
|
|
|
*
|
|
|
|
* Once these issues are closed, this should be
|
|
|
|
* removed
|
|
|
|
*/
|
|
|
|
head->msto->encoder.possible_crtcs = crtcs;
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-13 18:03:52 -04:00
|
|
|
}
|
2011-07-05 16:48:06 +10:00
|
|
|
}
|
|
|
|
|
2011-07-05 13:08:40 +10:00
|
|
|
/* create encoder/connector objects based on VBIOS DCB table */
|
|
|
|
for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
|
2018-07-12 13:13:52 -04:00
|
|
|
connector = nouveau_connector_create(dev, dcbe);
|
2011-07-05 13:08:40 +10:00
|
|
|
if (IS_ERR(connector))
|
|
|
|
continue;
|
|
|
|
|
2013-02-11 09:52:58 +10:00
|
|
|
if (dcbe->location == DCB_LOC_ON_CHIP) {
|
|
|
|
switch (dcbe->type) {
|
|
|
|
case DCB_OUTPUT_TMDS:
|
|
|
|
case DCB_OUTPUT_LVDS:
|
|
|
|
case DCB_OUTPUT_DP:
|
|
|
|
ret = nv50_sor_create(connector, dcbe);
|
|
|
|
break;
|
|
|
|
case DCB_OUTPUT_ANALOG:
|
|
|
|
ret = nv50_dac_create(connector, dcbe);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = -ENODEV;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = nv50_pior_create(connector, dcbe);
|
2011-07-05 13:08:40 +10:00
|
|
|
}
|
|
|
|
|
2013-02-11 09:52:58 +10:00
|
|
|
if (ret) {
|
|
|
|
NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
|
|
|
|
dcbe->location, dcbe->type,
|
|
|
|
ffs(dcbe->or) - 1, ret);
|
2013-03-05 22:26:06 +10:00
|
|
|
ret = 0;
|
2011-07-05 13:08:40 +10:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* cull any connectors we created that don't have an encoder */
|
|
|
|
list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
|
2019-09-13 16:28:57 -07:00
|
|
|
if (connector->possible_encoders)
|
2011-07-05 13:08:40 +10:00
|
|
|
continue;
|
|
|
|
|
2012-07-31 16:16:21 +10:00
|
|
|
NV_WARN(drm, "%s has no encoders, removing\n",
|
2014-06-03 14:56:18 +03:00
|
|
|
connector->name);
|
2011-07-05 13:08:40 +10:00
|
|
|
connector->funcs->destroy(connector);
|
|
|
|
}
|
|
|
|
|
drm/nouveau/kms/nv50-: Allow vblank_disable_immediate
With instantaneous high precision vblank timestamping
that updates at leading edge of vblank, the emulated
"hw vblank counter" from vblank timestamping, which
increments at leading edge of vblank, and reliable
page flip execution and completion at leading edge of
vblank, we should meet the requirements for fast/
immediate vblank irq disable/enable.
This is only allowed on nv50+ gpu's, ie. the ones with
atomic modesetting. One requirement for immediate vblank
disable is that high precision vblank timestamping works
reliably all the time on all connectors. This is not the
case on all pre-nv50 parts for analog VGA outputs, where we
currently don't always have support for scanout position
queries and therefore fall back to vblank interrupt
timestamping. The implementation in nv04_head_state() does
not return valid values for vblanks, vtotal, hblanks, htotal
for VGA outputs on all cards, but those are needed for scanout
position queries.
Testing on Linux-4.12-rc5 + drm-next on a GeForce 9500 GT
(NV G96) with timing measurement equipment indicates this
works fine, so allow immediate vblank disable for power
saving.
For debugging in case of unexpected trouble, booting
with kernel cmdline option drm.vblankoffdelay=0
(or echo 0 > /sys/module/drm/parameters/vblankoffdelay)
would keep vblank irqs permanently on to approximate old
behavior.
Signed-off-by: Mario Kleiner <mario.kleiner.de@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-07-16 16:47:50 +10:00
|
|
|
/* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
|
|
|
|
dev->vblank_disable_immediate = true;
|
|
|
|
|
2020-01-13 15:17:21 +01:00
|
|
|
nv50_audio_component_init(drm);
|
|
|
|
|
2011-07-04 16:25:18 +10:00
|
|
|
out:
|
|
|
|
if (ret)
|
2012-11-21 14:40:21 +10:00
|
|
|
nv50_display_destroy(dev);
|
2011-07-04 16:25:18 +10:00
|
|
|
return ret;
|
|
|
|
}
|
2020-02-10 15:15:53 -08:00
|
|
|
|
|
|
|
/******************************************************************************
|
|
|
|
* Format modifiers
|
|
|
|
*****************************************************************************/
|
|
|
|
|
|
|
|
/****************************************************************
|
|
|
|
* Log2(block height) ----------------------------+ *
|
|
|
|
* Page Kind ----------------------------------+ | *
|
|
|
|
* Gob Height/Page Kind Generation ------+ | | *
|
|
|
|
* Sector layout -------+ | | | *
|
|
|
|
* Compression ------+ | | | | */
|
|
|
|
const u64 disp50xx_modifiers[] = { /* | | | | | */
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 0),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 1),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 2),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 3),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 4),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 5),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 0),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 1),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 2),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 3),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 4),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 5),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 0),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 1),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 2),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 3),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 4),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 5),
|
|
|
|
DRM_FORMAT_MOD_LINEAR,
|
|
|
|
DRM_FORMAT_MOD_INVALID
|
|
|
|
};
|
|
|
|
|
|
|
|
/****************************************************************
|
|
|
|
* Log2(block height) ----------------------------+ *
|
|
|
|
* Page Kind ----------------------------------+ | *
|
|
|
|
* Gob Height/Page Kind Generation ------+ | | *
|
|
|
|
* Sector layout -------+ | | | *
|
|
|
|
* Compression ------+ | | | | */
|
|
|
|
const u64 disp90xx_modifiers[] = { /* | | | | | */
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 0),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 1),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 2),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 3),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 4),
|
|
|
|
DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 5),
|
|
|
|
DRM_FORMAT_MOD_LINEAR,
|
|
|
|
DRM_FORMAT_MOD_INVALID
|
|
|
|
};
|