linux/drivers/gpu/drm/nouveau/dispnv50/disp.c

2563 lines
67 KiB
C
Raw Normal View History

/*
* Copyright 2011 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "disp.h"
#include "atom.h"
#include "core.h"
#include "head.h"
#include "wndw.h"
#include <linux/dma-mapping.h>
#include <linux/hdmi.h>
#include <linux/component.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
drm: Split out drm_probe_helper.h Having the probe helper stuff (which pretty much everyone needs) in the drm_crtc_helper.h file (which atomic drivers should never need) is confusing. Split them out. To make sure I actually achieved the goal here I went through all drivers. And indeed, all atomic drivers are now free of drm_crtc_helper.h includes. v2: Make it compile. There was so much compile fail on arm drivers that I figured I'll better not include any of the acks on v1. v3: Massive rebase because i915 has lost a lot of drmP.h includes, but not all: Through drm_crtc_helper.h > drm_modeset_helper.h -> drmP.h there was still one, which this patch largely removes. Which means rolling out lots more includes all over. This will also conflict with ongoing drmP.h cleanup by others I expect. v3: Rebase on top of atomic bochs. v4: Review from Laurent for bridge/rcar/omap/shmob/core bits: - (re)move some of the added includes, use the better include files in other places (all suggested from Laurent adopted unchanged). - sort alphabetically v5: Actually try to sort them, and while at it, sort all the ones I touch. v6: Rebase onto i915 changes. v7: Rebase once more. Acked-by: Harry Wentland <harry.wentland@amd.com> Acked-by: Sam Ravnborg <sam@ravnborg.org> Cc: Sam Ravnborg <sam@ravnborg.org> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Acked-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> Acked-by: Jani Nikula <jani.nikula@intel.com> Acked-by: Neil Armstrong <narmstrong@baylibre.com> Acked-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> Acked-by: CK Hu <ck.hu@mediatek.com> Acked-by: Alex Deucher <alexander.deucher@amd.com> Acked-by: Sam Ravnborg <sam@ravnborg.org> Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Acked-by: Liviu Dudau <liviu.dudau@arm.com> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Cc: linux-arm-kernel@lists.infradead.org Cc: virtualization@lists.linux-foundation.org Cc: etnaviv@lists.freedesktop.org Cc: linux-samsung-soc@vger.kernel.org Cc: intel-gfx@lists.freedesktop.org Cc: linux-mediatek@lists.infradead.org Cc: linux-amlogic@lists.infradead.org Cc: linux-arm-msm@vger.kernel.org Cc: freedreno@lists.freedesktop.org Cc: nouveau@lists.freedesktop.org Cc: spice-devel@lists.freedesktop.org Cc: amd-gfx@lists.freedesktop.org Cc: linux-renesas-soc@vger.kernel.org Cc: linux-rockchip@lists.infradead.org Cc: linux-stm32@st-md-mailman.stormreply.com Cc: linux-tegra@vger.kernel.org Cc: xen-devel@lists.xen.org Link: https://patchwork.freedesktop.org/patch/msgid/20190117210334.13234-1-daniel.vetter@ffwll.ch
2019-01-17 22:03:34 +01:00
#include <drm/drm_probe_helper.h>
#include <drm/drm_scdc_helper.h>
#include <drm/drm_vblank.h>
#include <nvif/class.h>
#include <nvif/cl0002.h>
#include <nvif/cl5070.h>
#include <nvif/cl507d.h>
#include <nvif/event.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_gem.h"
#include "nouveau_connector.h"
#include "nouveau_encoder.h"
#include "nouveau_fence.h"
#include "nouveau_fbcon.h"
#include <subdev/bios/dp.h>
/******************************************************************************
* Atomic state
*****************************************************************************/
struct nv50_outp_atom {
struct list_head head;
struct drm_encoder *encoder;
bool flush_disable;
union nv50_outp_atom_mask {
struct {
bool ctrl:1;
};
u8 mask;
} set, clr;
};
/******************************************************************************
* EVO channel
*****************************************************************************/
static int
nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
const s32 *oclass, u8 head, void *data, u32 size,
struct nv50_chan *chan)
{
struct nvif_sclass *sclass;
int ret, i, n;
chan->device = device;
ret = n = nvif_object_sclass_get(disp, &sclass);
if (ret < 0)
return ret;
while (oclass[0]) {
for (i = 0; i < n; i++) {
if (sclass[i].oclass == oclass[0]) {
ret = nvif_object_init(disp, 0, oclass[0],
data, size, &chan->user);
if (ret == 0)
nvif_object_map(&chan->user, NULL, 0);
nvif_object_sclass_put(&sclass);
return ret;
}
}
oclass++;
}
nvif_object_sclass_put(&sclass);
return -ENOSYS;
}
static void
nv50_chan_destroy(struct nv50_chan *chan)
{
nvif_object_fini(&chan->user);
}
/******************************************************************************
* DMA EVO channel
*****************************************************************************/
void
nv50_dmac_destroy(struct nv50_dmac *dmac)
{
nvif_object_fini(&dmac->vram);
nvif_object_fini(&dmac->sync);
nv50_chan_destroy(&dmac->base);
nvif_mem_fini(&dmac->push);
}
int
nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
struct nv50_dmac *dmac)
{
struct nouveau_cli *cli = (void *)device->object.client;
struct nv50_disp_core_channel_dma_v0 *args = data;
u8 type = NVIF_MEM_COHERENT;
int ret;
mutex_init(&dmac->lock);
/* Pascal added support for 47-bit physical addresses, but some
* parts of EVO still only accept 40-bit PAs.
*
* To avoid issues on systems with large amounts of RAM, and on
* systems where an IOMMU maps pages at a high address, we need
* to allocate push buffers in VRAM instead.
*
* This appears to match NVIDIA's behaviour on Pascal.
*/
if (device->info.family == NV_DEVICE_INFO_V0_PASCAL)
type |= NVIF_MEM_VRAM;
ret = nvif_mem_init_map(&cli->mmu, type, 0x1000, &dmac->push);
if (ret)
return ret;
dmac->ptr = dmac->push.object.map.ptr;
args->pushbuf = nvif_handle(&dmac->push.object);
ret = nv50_chan_create(device, disp, oclass, head, data, size,
&dmac->base);
if (ret)
return ret;
if (!syncbuf)
return 0;
ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
.start = syncbuf + 0x0000,
.limit = syncbuf + 0x0fff,
}, sizeof(struct nv_dma_v0),
&dmac->sync);
if (ret)
return ret;
ret = nvif_object_init(&dmac->base.user, 0xf0000001, NV_DMA_IN_MEMORY,
&(struct nv_dma_v0) {
.target = NV_DMA_V0_TARGET_VRAM,
.access = NV_DMA_V0_ACCESS_RDWR,
.start = 0,
.limit = device->info.ram_user - 1,
}, sizeof(struct nv_dma_v0),
&dmac->vram);
if (ret)
return ret;
return ret;
}
/******************************************************************************
* EVO channel helpers
*****************************************************************************/
static void
evo_flush(struct nv50_dmac *dmac)
{
/* Push buffer fetches are not coherent with BAR1, we need to ensure
* writes have been flushed right through to VRAM before writing PUT.
*/
if (dmac->push.type & NVIF_MEM_VRAM) {
struct nvif_device *device = dmac->base.device;
nvif_wr32(&device->object, 0x070000, 0x00000001);
nvif_msec(device, 2000,
if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
break;
);
}
}
u32 *
evo_wait(struct nv50_dmac *evoc, int nr)
{
struct nv50_dmac *dmac = evoc;
struct nvif_device *device = dmac->base.device;
u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
mutex_lock(&dmac->lock);
if (put + nr >= (PAGE_SIZE / 4) - 8) {
dmac->ptr[put] = 0x20000000;
evo_flush(dmac);
nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
if (nvif_msec(device, 2000,
if (!nvif_rd32(&dmac->base.user, 0x0004))
break;
) < 0) {
mutex_unlock(&dmac->lock);
pr_err("nouveau: evo channel stalled\n");
return NULL;
}
put = 0;
}
return dmac->ptr + put;
}
void
evo_kick(u32 *push, struct nv50_dmac *evoc)
{
struct nv50_dmac *dmac = evoc;
evo_flush(dmac);
nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
mutex_unlock(&dmac->lock);
}
/******************************************************************************
* Output path helpers
*****************************************************************************/
static void
nv50_outp_release(struct nouveau_encoder *nv_encoder)
{
struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
struct {
struct nv50_disp_mthd_v1 base;
} args = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_RELEASE,
.base.hasht = nv_encoder->dcb->hasht,
.base.hashm = nv_encoder->dcb->hashm,
};
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
nv_encoder->or = -1;
nv_encoder->link = 0;
}
static int
nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
{
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nv50_disp *disp = nv50_disp(drm->dev);
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_acquire_v0 info;
} args = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_ACQUIRE,
.base.hasht = nv_encoder->dcb->hasht,
.base.hashm = nv_encoder->dcb->hashm,
};
int ret;
ret = nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
if (ret) {
NV_ERROR(drm, "error acquiring output path: %d\n", ret);
return ret;
}
nv_encoder->or = args.info.or;
nv_encoder->link = args.info.link;
return 0;
}
static int
nv50_outp_atomic_check_view(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state,
struct drm_display_mode *native_mode)
{
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
struct drm_display_mode *mode = &crtc_state->mode;
struct drm_connector *connector = conn_state->connector;
struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
asyc->scaler.full = false;
if (!native_mode)
return 0;
if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
/* Don't force scaler for EDID modes with
* same size as the native one (e.g. different
* refresh rate)
*/
if (mode->hdisplay == native_mode->hdisplay &&
mode->vdisplay == native_mode->vdisplay &&
mode->type & DRM_MODE_TYPE_DRIVER)
break;
mode = native_mode;
asyc->scaler.full = true;
break;
default:
break;
}
} else {
mode = native_mode;
}
if (!drm_mode_equal(adjusted_mode, mode)) {
drm_mode_copy(adjusted_mode, mode);
crtc_state->mode_changed = true;
}
return 0;
}
static int
nv50_outp_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_connector *connector = conn_state->connector;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
int ret;
ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
nv_connector->native_mode);
if (ret)
return ret;
if (crtc_state->mode_changed || crtc_state->connectors_changed)
asyh->or.bpc = connector->display_info.bpc;
return 0;
}
/******************************************************************************
* DAC
*****************************************************************************/
static void
nv50_dac_disable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
if (nv_encoder->crtc)
core->func->dac->ctrl(core, nv_encoder->or, 0x00000000, NULL);
nv_encoder->crtc = NULL;
nv50_outp_release(nv_encoder);
}
static void
nv50_dac_enable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
nv50_outp_acquire(nv_encoder);
core->func->dac->ctrl(core, nv_encoder->or, 1 << nv_crtc->index, asyh);
asyh->or.depth = 0;
nv_encoder->crtc = encoder->crtc;
}
static enum drm_connector_status
nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_dac_load_v0 load;
} args = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_DAC_LOAD,
.base.hasht = nv_encoder->dcb->hasht,
.base.hashm = nv_encoder->dcb->hashm,
};
int ret;
args.load.data = nouveau_drm(encoder->dev)->vbios.dactestval;
if (args.load.data == 0)
args.load.data = 340;
ret = nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
if (ret || !args.load.load)
return connector_status_disconnected;
return connector_status_connected;
}
static const struct drm_encoder_helper_funcs
nv50_dac_help = {
.atomic_check = nv50_outp_atomic_check,
.enable = nv50_dac_enable,
.disable = nv50_dac_disable,
.detect = nv50_dac_detect
};
static void
nv50_dac_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
kfree(encoder);
}
static const struct drm_encoder_funcs
nv50_dac_func = {
.destroy = nv50_dac_destroy,
};
static int
nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus;
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int type = DRM_MODE_ENCODER_DAC;
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
nv_encoder->dcb = dcbe;
bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
if (bus)
nv_encoder->i2c = &bus->i2c;
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
"dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_dac_help);
drm_connector_attach_encoder(connector, encoder);
return 0;
}
/*
* audio component binding for ELD notification
*/
static void
nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port)
{
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
port, -1);
}
static int
nv50_audio_component_get_eld(struct device *kdev, int port, int pipe,
bool *enabled, unsigned char *buf, int max_bytes)
{
struct drm_device *drm_dev = dev_get_drvdata(kdev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct drm_encoder *encoder;
struct nouveau_encoder *nv_encoder;
struct nouveau_connector *nv_connector;
struct nouveau_crtc *nv_crtc;
int ret = 0;
*enabled = false;
drm_for_each_encoder(encoder, drm->dev) {
nv_encoder = nouveau_encoder(encoder);
nv_connector = nouveau_encoder_connector_get(nv_encoder);
nv_crtc = nouveau_crtc(encoder->crtc);
if (!nv_connector || !nv_crtc || nv_crtc->index != port)
continue;
*enabled = drm_detect_monitor_audio(nv_connector->edid);
if (*enabled) {
ret = drm_eld_size(nv_connector->base.eld);
memcpy(buf, nv_connector->base.eld,
min(max_bytes, ret));
}
break;
}
return ret;
}
static const struct drm_audio_component_ops nv50_audio_component_ops = {
.get_eld = nv50_audio_component_get_eld,
};
static int
nv50_audio_component_bind(struct device *kdev, struct device *hda_kdev,
void *data)
{
struct drm_device *drm_dev = dev_get_drvdata(kdev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct drm_audio_component *acomp = data;
if (WARN_ON(!device_link_add(hda_kdev, kdev, DL_FLAG_STATELESS)))
return -ENOMEM;
drm_modeset_lock_all(drm_dev);
acomp->ops = &nv50_audio_component_ops;
acomp->dev = kdev;
drm->audio.component = acomp;
drm_modeset_unlock_all(drm_dev);
return 0;
}
static void
nv50_audio_component_unbind(struct device *kdev, struct device *hda_kdev,
void *data)
{
struct drm_device *drm_dev = dev_get_drvdata(kdev);
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct drm_audio_component *acomp = data;
drm_modeset_lock_all(drm_dev);
drm->audio.component = NULL;
acomp->ops = NULL;
acomp->dev = NULL;
drm_modeset_unlock_all(drm_dev);
}
static const struct component_ops nv50_audio_component_bind_ops = {
.bind = nv50_audio_component_bind,
.unbind = nv50_audio_component_unbind,
};
static void
nv50_audio_component_init(struct nouveau_drm *drm)
{
if (!component_add(drm->dev->dev, &nv50_audio_component_bind_ops))
drm->audio.component_registered = true;
}
static void
nv50_audio_component_fini(struct nouveau_drm *drm)
{
if (drm->audio.component_registered) {
component_del(drm->dev->dev, &nv50_audio_component_bind_ops);
drm->audio.component_registered = false;
}
}
/******************************************************************************
* Audio
*****************************************************************************/
static void
nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
{
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_hda_eld_v0 eld;
} args = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
.base.hasht = nv_encoder->dcb->hasht,
.base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
(0x0100 << nv_crtc->index),
};
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
}
static void
nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector;
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct __packed {
struct {
struct nv50_disp_mthd_v1 mthd;
struct nv50_disp_sor_hda_eld_v0 eld;
} base;
u8 data[sizeof(nv_connector->base.eld)];
} args = {
.base.mthd.version = 1,
.base.mthd.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
.base.mthd.hasht = nv_encoder->dcb->hasht,
.base.mthd.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
(0x0100 << nv_crtc->index),
};
nv_connector = nouveau_encoder_connector_get(nv_encoder);
if (!drm_detect_monitor_audio(nv_connector->edid))
return;
memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
nvif_mthd(&disp->disp->object, 0, &args,
sizeof(args.base) + drm_eld_size(args.data));
nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
}
/******************************************************************************
* HDMI
*****************************************************************************/
static void
nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_hdmi_pwr_v0 pwr;
} args = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
.base.hasht = nv_encoder->dcb->hasht,
.base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
(0x0100 << nv_crtc->index),
};
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
}
static void
nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
struct nouveau_drm *drm = nouveau_drm(encoder->dev);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_hdmi_pwr_v0 pwr;
u8 infoframes[2 * 17]; /* two frames, up to 17 bytes each */
} args = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
.base.hasht = nv_encoder->dcb->hasht,
.base.hashm = (0xf0ff & nv_encoder->dcb->hashm) |
(0x0100 << nv_crtc->index),
.pwr.state = 1,
.pwr.rekey = 56, /* binary driver, and tegra, constant */
};
struct nouveau_connector *nv_connector;
struct drm_hdmi_info *hdmi;
u32 max_ac_packet;
union hdmi_infoframe avi_frame;
union hdmi_infoframe vendor_frame;
drm/edid: Pass connector to AVI infoframe functions Make life easier for drivers by simply passing the connector to drm_hdmi_avi_infoframe_from_display_mode() and drm_hdmi_avi_infoframe_quant_range(). That way drivers don't need to worry about is_hdmi2_sink mess. v2: Make is_hdmi2_sink() return true for sil-sii8620 Adapt to omap/vc4 changes Cc: Alex Deucher <alexander.deucher@amd.com> Cc: "Christian König" <christian.koenig@amd.com> Cc: "David (ChunMing) Zhou" <David1.Zhou@amd.com> Cc: Archit Taneja <architt@codeaurora.org> Cc: Andrzej Hajda <a.hajda@samsung.com> Cc: Laurent Pinchart <Laurent.pinchart@ideasonboard.com> Cc: Inki Dae <inki.dae@samsung.com> Cc: Joonyoung Shim <jy0922.shim@samsung.com> Cc: Seung-Woo Kim <sw0312.kim@samsung.com> Cc: Kyungmin Park <kyungmin.park@samsung.com> Cc: Russell King <linux@armlinux.org.uk> Cc: CK Hu <ck.hu@mediatek.com> Cc: Philipp Zabel <p.zabel@pengutronix.de> Cc: Rob Clark <robdclark@gmail.com> Cc: Ben Skeggs <bskeggs@redhat.com> Cc: Tomi Valkeinen <tomi.valkeinen@ti.com> Cc: Sandy Huang <hjc@rock-chips.com> Cc: "Heiko Stübner" <heiko@sntech.de> Cc: Benjamin Gaignard <benjamin.gaignard@linaro.org> Cc: Vincent Abriou <vincent.abriou@st.com> Cc: Thierry Reding <thierry.reding@gmail.com> Cc: Eric Anholt <eric@anholt.net> Cc: Shawn Guo <shawnguo@kernel.org> Cc: Ilia Mirkin <imirkin@alum.mit.edu> Cc: amd-gfx@lists.freedesktop.org Cc: linux-arm-msm@vger.kernel.org Cc: freedreno@lists.freedesktop.org Cc: nouveau@lists.freedesktop.org Cc: linux-tegra@vger.kernel.org Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Acked-by: Thierry Reding <treding@nvidia.com> Acked-by: Russell King <rmk+kernel@armlinux.org.uk> Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Reviewed-by: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190108172828.15184-1-ville.syrjala@linux.intel.com
2019-01-08 19:28:25 +02:00
bool high_tmds_clock_ratio = false, scrambling = false;
u8 config;
int ret;
int size;
nv_connector = nouveau_encoder_connector_get(nv_encoder);
if (!drm_detect_hdmi_monitor(nv_connector->edid))
return;
hdmi = &nv_connector->base.display_info.hdmi;
drm/edid: Pass connector to AVI infoframe functions Make life easier for drivers by simply passing the connector to drm_hdmi_avi_infoframe_from_display_mode() and drm_hdmi_avi_infoframe_quant_range(). That way drivers don't need to worry about is_hdmi2_sink mess. v2: Make is_hdmi2_sink() return true for sil-sii8620 Adapt to omap/vc4 changes Cc: Alex Deucher <alexander.deucher@amd.com> Cc: "Christian König" <christian.koenig@amd.com> Cc: "David (ChunMing) Zhou" <David1.Zhou@amd.com> Cc: Archit Taneja <architt@codeaurora.org> Cc: Andrzej Hajda <a.hajda@samsung.com> Cc: Laurent Pinchart <Laurent.pinchart@ideasonboard.com> Cc: Inki Dae <inki.dae@samsung.com> Cc: Joonyoung Shim <jy0922.shim@samsung.com> Cc: Seung-Woo Kim <sw0312.kim@samsung.com> Cc: Kyungmin Park <kyungmin.park@samsung.com> Cc: Russell King <linux@armlinux.org.uk> Cc: CK Hu <ck.hu@mediatek.com> Cc: Philipp Zabel <p.zabel@pengutronix.de> Cc: Rob Clark <robdclark@gmail.com> Cc: Ben Skeggs <bskeggs@redhat.com> Cc: Tomi Valkeinen <tomi.valkeinen@ti.com> Cc: Sandy Huang <hjc@rock-chips.com> Cc: "Heiko Stübner" <heiko@sntech.de> Cc: Benjamin Gaignard <benjamin.gaignard@linaro.org> Cc: Vincent Abriou <vincent.abriou@st.com> Cc: Thierry Reding <thierry.reding@gmail.com> Cc: Eric Anholt <eric@anholt.net> Cc: Shawn Guo <shawnguo@kernel.org> Cc: Ilia Mirkin <imirkin@alum.mit.edu> Cc: amd-gfx@lists.freedesktop.org Cc: linux-arm-msm@vger.kernel.org Cc: freedreno@lists.freedesktop.org Cc: nouveau@lists.freedesktop.org Cc: linux-tegra@vger.kernel.org Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Acked-by: Thierry Reding <treding@nvidia.com> Acked-by: Russell King <rmk+kernel@armlinux.org.uk> Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Reviewed-by: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190108172828.15184-1-ville.syrjala@linux.intel.com
2019-01-08 19:28:25 +02:00
ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi,
&nv_connector->base, mode);
if (!ret) {
/* We have an AVI InfoFrame, populate it to the display */
args.pwr.avi_infoframe_length
= hdmi_infoframe_pack(&avi_frame, args.infoframes, 17);
}
2017-11-13 19:04:19 +02:00
ret = drm_hdmi_vendor_infoframe_from_display_mode(&vendor_frame.vendor.hdmi,
&nv_connector->base, mode);
if (!ret) {
/* We have a Vendor InfoFrame, populate it to the display */
args.pwr.vendor_infoframe_length
= hdmi_infoframe_pack(&vendor_frame,
args.infoframes
+ args.pwr.avi_infoframe_length,
17);
}
max_ac_packet = mode->htotal - mode->hdisplay;
max_ac_packet -= args.pwr.rekey;
max_ac_packet -= 18; /* constant from tegra */
args.pwr.max_ac_packet = max_ac_packet / 32;
if (hdmi->scdc.scrambling.supported) {
high_tmds_clock_ratio = mode->clock > 340000;
scrambling = high_tmds_clock_ratio ||
hdmi->scdc.scrambling.low_rates;
}
args.pwr.scdc =
NV50_DISP_SOR_HDMI_PWR_V0_SCDC_SCRAMBLE * scrambling |
NV50_DISP_SOR_HDMI_PWR_V0_SCDC_DIV_BY_4 * high_tmds_clock_ratio;
size = sizeof(args.base)
+ sizeof(args.pwr)
+ args.pwr.avi_infoframe_length
+ args.pwr.vendor_infoframe_length;
nvif_mthd(&disp->disp->object, 0, &args, size);
nv50_audio_enable(encoder, mode);
/* If SCDC is supported by the downstream monitor, update
* divider / scrambling settings to what we programmed above.
*/
if (!hdmi->scdc.scrambling.supported)
return;
ret = drm_scdc_readb(nv_encoder->i2c, SCDC_TMDS_CONFIG, &config);
if (ret < 0) {
NV_ERROR(drm, "Failure to read SCDC_TMDS_CONFIG: %d\n", ret);
return;
}
config &= ~(SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 | SCDC_SCRAMBLING_ENABLE);
config |= SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 * high_tmds_clock_ratio;
config |= SCDC_SCRAMBLING_ENABLE * scrambling;
ret = drm_scdc_writeb(nv_encoder->i2c, SCDC_TMDS_CONFIG, config);
if (ret < 0)
NV_ERROR(drm, "Failure to write SCDC_TMDS_CONFIG = 0x%02x: %d\n",
config, ret);
}
/******************************************************************************
* MST
*****************************************************************************/
#define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
#define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
#define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
struct nv50_mstm {
struct nouveau_encoder *outp;
struct drm_dp_mst_topology_mgr mgr;
bool modified;
bool disabled;
int links;
};
struct nv50_mstc {
struct nv50_mstm *mstm;
struct drm_dp_mst_port *port;
struct drm_connector connector;
struct drm_display_mode *native;
struct edid *edid;
};
struct nv50_msto {
struct drm_encoder encoder;
struct nv50_head *head;
struct nv50_mstc *mstc;
bool disabled;
};
static struct drm_dp_payload *
nv50_msto_payload(struct nv50_msto *msto)
{
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
int vcpi = mstc->port->vcpi.vcpi, i;
WARN_ON(!mutex_is_locked(&mstm->mgr.payload_lock));
NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
for (i = 0; i < mstm->mgr.max_payloads; i++) {
struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
mstm->outp->base.base.name, i, payload->vcpi,
payload->start_slot, payload->num_slots);
}
for (i = 0; i < mstm->mgr.max_payloads; i++) {
struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
if (payload->vcpi == vcpi)
return payload;
}
return NULL;
}
static void
nv50_msto_cleanup(struct nv50_msto *msto)
{
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
if (!msto->disabled)
return;
NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
msto->mstc = NULL;
msto->disabled = false;
}
static void
nv50_msto_prepare(struct nv50_msto *msto)
{
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
} args = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI,
.base.hasht = mstm->outp->dcb->hasht,
.base.hashm = (0xf0ff & mstm->outp->dcb->hashm) |
(0x0100 << msto->head->base.index),
};
mutex_lock(&mstm->mgr.payload_lock);
NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
if (mstc->port->vcpi.vcpi > 0) {
struct drm_dp_payload *payload = nv50_msto_payload(msto);
if (payload) {
args.vcpi.start_slot = payload->start_slot;
args.vcpi.num_slots = payload->num_slots;
args.vcpi.pbn = mstc->port->vcpi.pbn;
args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn;
}
}
NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
msto->encoder.name, msto->head->base.base.name,
args.vcpi.start_slot, args.vcpi.num_slots,
args.vcpi.pbn, args.vcpi.aligned_pbn);
nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args));
mutex_unlock(&mstm->mgr.payload_lock);
}
static int
nv50_msto_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct drm_atomic_state *state = crtc_state->state;
struct drm_connector *connector = conn_state->connector;
struct nv50_mstc *mstc = nv50_mstc(connector);
struct nv50_mstm *mstm = mstc->mstm;
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
int slots;
int ret;
ret = nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
mstc->native);
if (ret)
return ret;
if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
return 0;
/*
* When restoring duplicated states, we need to make sure that the bw
* remains the same and avoid recalculating it, as the connector's bpc
* may have changed after the state was duplicated
*/
if (!state->duplicated) {
const int clock = crtc_state->adjusted_mode.clock;
drm/nouveau/kms/nv50-: Limit MST BPC to 8 Noticed this while working on some unrelated CRC stuff. Currently, userspace has very little support for BPCs higher than 8. While this doesn't matter for most things, on MST topologies we need to be careful about ensuring that we do our best to make any given display configuration fit within the bandwidth restraints of the topology, since otherwise less people's monitor configurations will work. Allowing for BPC settings higher than 8 dramatically increases the required bandwidth for displays in most configurations, and consequently makes it a lot less likely that said display configurations will pass the atomic check. In the future we want to fix this correctly by making it so that we adjust the bpp for each display in a topology to be as high as possible, while making sure to lower the bpp of each display in the event that we run out of bandwidth and need to rerun our atomic check. But for now, follow the behavior that both i915 and amdgpu are sticking to. Signed-off-by: Lyude Paul <lyude@redhat.com> Fixes: 232c9eec417a ("drm/nouveau: Use atomic VCPI helpers for MST") Cc: Ben Skeggs <bskeggs@redhat.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Airlie <airlied@redhat.com> Cc: Jerry Zuo <Jerry.Zuo@amd.com> Cc: Harry Wentland <harry.wentland@amd.com> Cc: Juston Li <juston.li@intel.com> Cc: Sam Ravnborg <sam@ravnborg.org> Cc: Sean Paul <seanpaul@chromium.org> Cc: <stable@vger.kernel.org> # v5.1+ Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-11-15 16:07:20 -05:00
/*
* XXX: Since we don't use HDR in userspace quite yet, limit
* the bpc to 8 to save bandwidth on the topology. In the
* future, we'll want to properly fix this by dynamically
* selecting the highest possible bpc that would fit in the
* topology
*/
asyh->or.bpc = min(connector->display_info.bpc, 8U);
asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3, false);
}
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
asyh->dp.pbn, 0);
if (slots < 0)
return slots;
asyh->dp.tu = slots;
return 0;
}
static u8
nv50_dp_bpc_to_depth(unsigned int bpc)
{
switch (bpc) {
case 6: return 0x2;
case 8: return 0x5;
case 10: /* fall-through */
default: return 0x6;
}
}
static void
nv50_msto_enable(struct drm_encoder *encoder)
{
struct nv50_head *head = nv50_head(encoder->crtc);
struct nv50_head_atom *armh = nv50_head_atom(head->base.base.state);
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = NULL;
struct nv50_mstm *mstm = NULL;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
u8 proto;
bool r;
drm_connector_list_iter_begin(encoder->dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->state->best_encoder == &msto->encoder) {
mstc = nv50_mstc(connector);
mstm = mstc->mstm;
break;
}
}
drm_connector_list_iter_end(&conn_iter);
if (WARN_ON(!mstc))
return;
r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, armh->dp.pbn,
armh->dp.tu);
if (!r)
DRM_DEBUG_KMS("Failed to allocate VCPI\n");
if (!mstm->links++)
nv50_outp_acquire(mstm->outp);
if (mstm->outp->link & 1)
proto = 0x8;
else
proto = 0x9;
mstm->outp->update(mstm->outp, head->base.index, armh, proto,
nv50_dp_bpc_to_depth(armh->or.bpc));
msto->mstc = mstc;
mstm->modified = true;
}
static void
nv50_msto_disable(struct drm_encoder *encoder)
{
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
mstm->modified = true;
if (!--mstm->links)
mstm->disabled = true;
msto->disabled = true;
}
static const struct drm_encoder_helper_funcs
nv50_msto_help = {
.disable = nv50_msto_disable,
.enable = nv50_msto_enable,
.atomic_check = nv50_msto_atomic_check,
};
static void
nv50_msto_destroy(struct drm_encoder *encoder)
{
struct nv50_msto *msto = nv50_msto(encoder);
drm_encoder_cleanup(&msto->encoder);
kfree(msto);
}
static const struct drm_encoder_funcs
nv50_msto = {
.destroy = nv50_msto_destroy,
};
static struct nv50_msto *
nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id)
{
struct nv50_msto *msto;
int ret;
msto = kzalloc(sizeof(*msto), GFP_KERNEL);
if (!msto)
return ERR_PTR(-ENOMEM);
ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
DRM_MODE_ENCODER_DPMST, "mst-%d", id);
if (ret) {
kfree(msto);
return ERR_PTR(ret);
}
drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
msto->encoder.possible_crtcs = drm_crtc_mask(&head->base.base);
msto->head = head;
return msto;
}
static struct drm_encoder *
nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
struct drm_connector_state *connector_state)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
struct drm_crtc *crtc = connector_state->crtc;
if (!(mstc->mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
return NULL;
return &nv50_head(crtc)->msto->encoder;
}
static enum drm_mode_status
nv50_mstc_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static int
nv50_mstc_get_modes(struct drm_connector *connector)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
int ret = 0;
mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
drm_connector_update_edid_property(&mstc->connector, mstc->edid);
if (mstc->edid)
ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
if (!mstc->connector.display_info.bpc)
mstc->connector.display_info.bpc = 8;
if (mstc->native)
drm_mode_destroy(mstc->connector.dev, mstc->native);
mstc->native = nouveau_conn_native_mode(&mstc->connector);
return ret;
}
static int
nv50_mstc_atomic_check(struct drm_connector *connector,
drm: Convert connector_helper_funcs->atomic_check to accept drm_atomic_state Everyone who implements connector_helper_funcs->atomic_check reaches into the connector state to get the atomic state. Instead of continuing this pattern, change the callback signature to just give atomic state and let the driver determine what it does and does not need from it. Eventually all atomic functions should do this, but that's just too much busy work for me. Changes in v3: - Added to the set Changes in v4: - None Changes in v5: - intel_digital_connector_atomic_check declaration moved to i915_atomic.h Link to v3: https://patchwork.freedesktop.org/patch/msgid/20190502194956.218441-5-sean@poorly.run Link to v4: https://patchwork.freedesktop.org/patch/msgid/20190508160920.144739-5-sean@poorly.run Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Ben Skeggs <bskeggs@redhat.com> Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Cc: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com> Cc: Eric Anholt <eric@anholt.net> Tested-by: Heiko Stuebner <heiko@sntech.de> Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> [for rcar lvds] Acked-by: Daniel Vetter <daniel@ffwll.ch> Signed-off-by: Sean Paul <seanpaul@chromium.org> Link: https://patchwork.freedesktop.org/patch/msgid/20190611160844.257498-5-sean@poorly.run
2019-06-11 12:08:18 -04:00
struct drm_atomic_state *state)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
struct drm_dp_mst_topology_mgr *mgr = &mstc->mstm->mgr;
drm: Convert connector_helper_funcs->atomic_check to accept drm_atomic_state Everyone who implements connector_helper_funcs->atomic_check reaches into the connector state to get the atomic state. Instead of continuing this pattern, change the callback signature to just give atomic state and let the driver determine what it does and does not need from it. Eventually all atomic functions should do this, but that's just too much busy work for me. Changes in v3: - Added to the set Changes in v4: - None Changes in v5: - intel_digital_connector_atomic_check declaration moved to i915_atomic.h Link to v3: https://patchwork.freedesktop.org/patch/msgid/20190502194956.218441-5-sean@poorly.run Link to v4: https://patchwork.freedesktop.org/patch/msgid/20190508160920.144739-5-sean@poorly.run Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Ben Skeggs <bskeggs@redhat.com> Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Cc: Kieran Bingham <kieran.bingham+renesas@ideasonboard.com> Cc: Eric Anholt <eric@anholt.net> Tested-by: Heiko Stuebner <heiko@sntech.de> Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> [for rcar lvds] Acked-by: Daniel Vetter <daniel@ffwll.ch> Signed-off-by: Sean Paul <seanpaul@chromium.org> Link: https://patchwork.freedesktop.org/patch/msgid/20190611160844.257498-5-sean@poorly.run
2019-06-11 12:08:18 -04:00
struct drm_connector_state *new_conn_state =
drm_atomic_get_new_connector_state(state, connector);
struct drm_connector_state *old_conn_state =
drm_atomic_get_old_connector_state(state, connector);
struct drm_crtc_state *crtc_state;
struct drm_crtc *new_crtc = new_conn_state->crtc;
if (!old_conn_state->crtc)
return 0;
/* We only want to free VCPI if this state disables the CRTC on this
* connector
*/
if (new_crtc) {
crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
if (!crtc_state ||
!drm_atomic_crtc_needs_modeset(crtc_state) ||
crtc_state->enable)
return 0;
}
return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port);
}
drm/dp_mst: Protect drm_dp_mst_port members with locking This is a complicated one. Essentially, there's currently a problem in the MST core that hasn't really caused any issues that we're aware of (emphasis on "that we're aware of"): locking. When we go through and probe the link addresses and path resources in a topology, we hold no locks when updating ports with said information. The members I'm referring to in particular are: - ldps - ddps - mcs - pdt - dpcd_rev - num_sdp_streams - num_sdp_stream_sinks - available_pbn - input - connector Now that we're handling UP requests asynchronously and will be using some of the struct members mentioned above in atomic modesetting in the future for features such as PBN validation, this is going to become a lot more important. As well, the next few commits that prepare us for and introduce suspend/resume reprobing will also need clear locking in order to prevent from additional racing hilarities that we never could have hit in the past. So, let's solve this issue by using &mgr->base.lock, the modesetting lock which currently only protects &mgr->base.state. This works perfectly because it allows us to avoid blocking connection_mutex unnecessarily, and we can grab this in connector detection paths since it's a ww mutex. We start by having drm_dp_mst_handle_up_req() hold this when updating ports. For drm_dp_mst_handle_link_address_port() things are a bit more complicated. As I've learned the hard way, we can grab &mgr->lock.base for everything except for port->connector. See, our normal driver probing paths end up generating this rather obvious lockdep chain: &drm->mode_config.mutex -> crtc_ww_class_mutex/crtc_ww_class_acquire -> &connector->mutex However, sysfs grabs &drm->mode_config.mutex in order to protect itself from connector state changing under it. Because this entails grabbing kn->count, e.g. the lock that the kernel provides for protecting sysfs contexts, we end up grabbing kn->count followed by &drm->mode_config.mutex. This ends up creating an extremely rude chain: &kn->count -> &drm->mode_config.mutex -> crtc_ww_class_mutex/crtc_ww_class_acquire -> &connector->mutex I mean, look at that thing! It's just evil!!! This gross thing ends up making any calls to drm_connector_register()/drm_connector_unregister() impossible when holding any kind of modesetting lock. This is annoying because ideally, we always want to ensure that drm_dp_mst_port->connector never changes when doing an atomic commit or check that would affect the atomic topology state so that it can reliably and easily be used from future DRM DP MST helpers to assist with tasks such as scanning through the current VCPI allocations and adding connectors which need to have their allocations updated in response to a bandwidth change or the like. Being able to hold &mgr->base.lock throughout the entire link probe process would have been _great_, since we could prevent userspace from ever seeing any states in-between individual port changes and as a result likely end up with a much faster probe and more consistent results from said probes. But without some rework of how we handle connector probing in sysfs it's not at all currently possible. In the future, maybe we can try using the sysfs locks to protect updates to connector probing state and fix this mess. So for now, to protect everything other than port->connector under &mgr->base.lock and ensure that we still have the guarantee that atomic check/commit contexts will never see port->connector change we use a silly trick. See: port->connector only needs to change in order to ensure that input ports (see the MST spec) never have a ghost connector associated with them. But, there's nothing stopping us from simply throwing the entire port out and creating a new one in order to maintain that requirement while still keeping port->connector consistent across the lifetime of the port in atomic check/commit contexts. For all intended purposes this works fine, as we validate ports in any contexts we care about before using them and as such will end up reporting the connector as disconnected until it's port's destruction finalizes. So, we just do that in cases where we detect port->input has transitioned from true->false. We don't need to worry about the other direction, since a port without a connector isn't visible to userspace and as such doesn't need to be protected by &mgr->base.lock until we finish registering a connector for it. For updating members of drm_dp_mst_port other than port->connector, we simply grab &mgr->base.lock in drm_dp_mst_link_probe_work() for already registered ports, update said members and drop the lock before potentially registering a connector and probing the link address of it's children. Finally, we modify drm_dp_mst_detect_port() to take a modesetting lock acquisition context in order to acquire &mgr->base.lock under &connection_mutex and convert all it's users over to using the .detect_ctx probe hooks. With that, we finally have well defined locking. Changes since v4: * Get rid of port->mutex, stop using connection_mutex and just use our own modesetting lock - mgr->base.lock. Also, add a probe_lock that comes before this patch. * Just throw out ports that get changed from an output to an input, and replace them with new ports. This lets us ensure that modesetting contexts never see port->connector go from having a connector to being NULL. * Write an extremely detailed explanation of what problems this is trying to fix, since there's a _lot_ of context here and I honestly forgot some of it myself a couple times. * Don't grab mgr->lock when reading port->mstb in drm_dp_mst_handle_link_address_port(). It's not needed. Cc: Juston Li <juston.li@intel.com> Cc: Imre Deak <imre.deak@intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Harry Wentland <hwentlan@amd.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Sean Paul <sean@poorly.run> Signed-off-by: Lyude Paul <lyude@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191022023641.8026-7-lyude@redhat.com
2019-06-17 17:59:29 -04:00
static int
nv50_mstc_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx, bool force)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
int ret;
if (drm_connector_is_unregistered(connector))
return connector_status_disconnected;
ret = pm_runtime_get_sync(connector->dev->dev);
if (ret < 0 && ret != -EACCES)
return connector_status_disconnected;
drm/dp_mst: Protect drm_dp_mst_port members with locking This is a complicated one. Essentially, there's currently a problem in the MST core that hasn't really caused any issues that we're aware of (emphasis on "that we're aware of"): locking. When we go through and probe the link addresses and path resources in a topology, we hold no locks when updating ports with said information. The members I'm referring to in particular are: - ldps - ddps - mcs - pdt - dpcd_rev - num_sdp_streams - num_sdp_stream_sinks - available_pbn - input - connector Now that we're handling UP requests asynchronously and will be using some of the struct members mentioned above in atomic modesetting in the future for features such as PBN validation, this is going to become a lot more important. As well, the next few commits that prepare us for and introduce suspend/resume reprobing will also need clear locking in order to prevent from additional racing hilarities that we never could have hit in the past. So, let's solve this issue by using &mgr->base.lock, the modesetting lock which currently only protects &mgr->base.state. This works perfectly because it allows us to avoid blocking connection_mutex unnecessarily, and we can grab this in connector detection paths since it's a ww mutex. We start by having drm_dp_mst_handle_up_req() hold this when updating ports. For drm_dp_mst_handle_link_address_port() things are a bit more complicated. As I've learned the hard way, we can grab &mgr->lock.base for everything except for port->connector. See, our normal driver probing paths end up generating this rather obvious lockdep chain: &drm->mode_config.mutex -> crtc_ww_class_mutex/crtc_ww_class_acquire -> &connector->mutex However, sysfs grabs &drm->mode_config.mutex in order to protect itself from connector state changing under it. Because this entails grabbing kn->count, e.g. the lock that the kernel provides for protecting sysfs contexts, we end up grabbing kn->count followed by &drm->mode_config.mutex. This ends up creating an extremely rude chain: &kn->count -> &drm->mode_config.mutex -> crtc_ww_class_mutex/crtc_ww_class_acquire -> &connector->mutex I mean, look at that thing! It's just evil!!! This gross thing ends up making any calls to drm_connector_register()/drm_connector_unregister() impossible when holding any kind of modesetting lock. This is annoying because ideally, we always want to ensure that drm_dp_mst_port->connector never changes when doing an atomic commit or check that would affect the atomic topology state so that it can reliably and easily be used from future DRM DP MST helpers to assist with tasks such as scanning through the current VCPI allocations and adding connectors which need to have their allocations updated in response to a bandwidth change or the like. Being able to hold &mgr->base.lock throughout the entire link probe process would have been _great_, since we could prevent userspace from ever seeing any states in-between individual port changes and as a result likely end up with a much faster probe and more consistent results from said probes. But without some rework of how we handle connector probing in sysfs it's not at all currently possible. In the future, maybe we can try using the sysfs locks to protect updates to connector probing state and fix this mess. So for now, to protect everything other than port->connector under &mgr->base.lock and ensure that we still have the guarantee that atomic check/commit contexts will never see port->connector change we use a silly trick. See: port->connector only needs to change in order to ensure that input ports (see the MST spec) never have a ghost connector associated with them. But, there's nothing stopping us from simply throwing the entire port out and creating a new one in order to maintain that requirement while still keeping port->connector consistent across the lifetime of the port in atomic check/commit contexts. For all intended purposes this works fine, as we validate ports in any contexts we care about before using them and as such will end up reporting the connector as disconnected until it's port's destruction finalizes. So, we just do that in cases where we detect port->input has transitioned from true->false. We don't need to worry about the other direction, since a port without a connector isn't visible to userspace and as such doesn't need to be protected by &mgr->base.lock until we finish registering a connector for it. For updating members of drm_dp_mst_port other than port->connector, we simply grab &mgr->base.lock in drm_dp_mst_link_probe_work() for already registered ports, update said members and drop the lock before potentially registering a connector and probing the link address of it's children. Finally, we modify drm_dp_mst_detect_port() to take a modesetting lock acquisition context in order to acquire &mgr->base.lock under &connection_mutex and convert all it's users over to using the .detect_ctx probe hooks. With that, we finally have well defined locking. Changes since v4: * Get rid of port->mutex, stop using connection_mutex and just use our own modesetting lock - mgr->base.lock. Also, add a probe_lock that comes before this patch. * Just throw out ports that get changed from an output to an input, and replace them with new ports. This lets us ensure that modesetting contexts never see port->connector go from having a connector to being NULL. * Write an extremely detailed explanation of what problems this is trying to fix, since there's a _lot_ of context here and I honestly forgot some of it myself a couple times. * Don't grab mgr->lock when reading port->mstb in drm_dp_mst_handle_link_address_port(). It's not needed. Cc: Juston Li <juston.li@intel.com> Cc: Imre Deak <imre.deak@intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Harry Wentland <hwentlan@amd.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Sean Paul <sean@poorly.run> Signed-off-by: Lyude Paul <lyude@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191022023641.8026-7-lyude@redhat.com
2019-06-17 17:59:29 -04:00
ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
mstc->port);
pm_runtime_mark_last_busy(connector->dev->dev);
pm_runtime_put_autosuspend(connector->dev->dev);
drm/dp_mst: Protect drm_dp_mst_port members with locking This is a complicated one. Essentially, there's currently a problem in the MST core that hasn't really caused any issues that we're aware of (emphasis on "that we're aware of"): locking. When we go through and probe the link addresses and path resources in a topology, we hold no locks when updating ports with said information. The members I'm referring to in particular are: - ldps - ddps - mcs - pdt - dpcd_rev - num_sdp_streams - num_sdp_stream_sinks - available_pbn - input - connector Now that we're handling UP requests asynchronously and will be using some of the struct members mentioned above in atomic modesetting in the future for features such as PBN validation, this is going to become a lot more important. As well, the next few commits that prepare us for and introduce suspend/resume reprobing will also need clear locking in order to prevent from additional racing hilarities that we never could have hit in the past. So, let's solve this issue by using &mgr->base.lock, the modesetting lock which currently only protects &mgr->base.state. This works perfectly because it allows us to avoid blocking connection_mutex unnecessarily, and we can grab this in connector detection paths since it's a ww mutex. We start by having drm_dp_mst_handle_up_req() hold this when updating ports. For drm_dp_mst_handle_link_address_port() things are a bit more complicated. As I've learned the hard way, we can grab &mgr->lock.base for everything except for port->connector. See, our normal driver probing paths end up generating this rather obvious lockdep chain: &drm->mode_config.mutex -> crtc_ww_class_mutex/crtc_ww_class_acquire -> &connector->mutex However, sysfs grabs &drm->mode_config.mutex in order to protect itself from connector state changing under it. Because this entails grabbing kn->count, e.g. the lock that the kernel provides for protecting sysfs contexts, we end up grabbing kn->count followed by &drm->mode_config.mutex. This ends up creating an extremely rude chain: &kn->count -> &drm->mode_config.mutex -> crtc_ww_class_mutex/crtc_ww_class_acquire -> &connector->mutex I mean, look at that thing! It's just evil!!! This gross thing ends up making any calls to drm_connector_register()/drm_connector_unregister() impossible when holding any kind of modesetting lock. This is annoying because ideally, we always want to ensure that drm_dp_mst_port->connector never changes when doing an atomic commit or check that would affect the atomic topology state so that it can reliably and easily be used from future DRM DP MST helpers to assist with tasks such as scanning through the current VCPI allocations and adding connectors which need to have their allocations updated in response to a bandwidth change or the like. Being able to hold &mgr->base.lock throughout the entire link probe process would have been _great_, since we could prevent userspace from ever seeing any states in-between individual port changes and as a result likely end up with a much faster probe and more consistent results from said probes. But without some rework of how we handle connector probing in sysfs it's not at all currently possible. In the future, maybe we can try using the sysfs locks to protect updates to connector probing state and fix this mess. So for now, to protect everything other than port->connector under &mgr->base.lock and ensure that we still have the guarantee that atomic check/commit contexts will never see port->connector change we use a silly trick. See: port->connector only needs to change in order to ensure that input ports (see the MST spec) never have a ghost connector associated with them. But, there's nothing stopping us from simply throwing the entire port out and creating a new one in order to maintain that requirement while still keeping port->connector consistent across the lifetime of the port in atomic check/commit contexts. For all intended purposes this works fine, as we validate ports in any contexts we care about before using them and as such will end up reporting the connector as disconnected until it's port's destruction finalizes. So, we just do that in cases where we detect port->input has transitioned from true->false. We don't need to worry about the other direction, since a port without a connector isn't visible to userspace and as such doesn't need to be protected by &mgr->base.lock until we finish registering a connector for it. For updating members of drm_dp_mst_port other than port->connector, we simply grab &mgr->base.lock in drm_dp_mst_link_probe_work() for already registered ports, update said members and drop the lock before potentially registering a connector and probing the link address of it's children. Finally, we modify drm_dp_mst_detect_port() to take a modesetting lock acquisition context in order to acquire &mgr->base.lock under &connection_mutex and convert all it's users over to using the .detect_ctx probe hooks. With that, we finally have well defined locking. Changes since v4: * Get rid of port->mutex, stop using connection_mutex and just use our own modesetting lock - mgr->base.lock. Also, add a probe_lock that comes before this patch. * Just throw out ports that get changed from an output to an input, and replace them with new ports. This lets us ensure that modesetting contexts never see port->connector go from having a connector to being NULL. * Write an extremely detailed explanation of what problems this is trying to fix, since there's a _lot_ of context here and I honestly forgot some of it myself a couple times. * Don't grab mgr->lock when reading port->mstb in drm_dp_mst_handle_link_address_port(). It's not needed. Cc: Juston Li <juston.li@intel.com> Cc: Imre Deak <imre.deak@intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Harry Wentland <hwentlan@amd.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Sean Paul <sean@poorly.run> Signed-off-by: Lyude Paul <lyude@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191022023641.8026-7-lyude@redhat.com
2019-06-17 17:59:29 -04:00
return ret;
}
drm/dp_mst: Protect drm_dp_mst_port members with locking This is a complicated one. Essentially, there's currently a problem in the MST core that hasn't really caused any issues that we're aware of (emphasis on "that we're aware of"): locking. When we go through and probe the link addresses and path resources in a topology, we hold no locks when updating ports with said information. The members I'm referring to in particular are: - ldps - ddps - mcs - pdt - dpcd_rev - num_sdp_streams - num_sdp_stream_sinks - available_pbn - input - connector Now that we're handling UP requests asynchronously and will be using some of the struct members mentioned above in atomic modesetting in the future for features such as PBN validation, this is going to become a lot more important. As well, the next few commits that prepare us for and introduce suspend/resume reprobing will also need clear locking in order to prevent from additional racing hilarities that we never could have hit in the past. So, let's solve this issue by using &mgr->base.lock, the modesetting lock which currently only protects &mgr->base.state. This works perfectly because it allows us to avoid blocking connection_mutex unnecessarily, and we can grab this in connector detection paths since it's a ww mutex. We start by having drm_dp_mst_handle_up_req() hold this when updating ports. For drm_dp_mst_handle_link_address_port() things are a bit more complicated. As I've learned the hard way, we can grab &mgr->lock.base for everything except for port->connector. See, our normal driver probing paths end up generating this rather obvious lockdep chain: &drm->mode_config.mutex -> crtc_ww_class_mutex/crtc_ww_class_acquire -> &connector->mutex However, sysfs grabs &drm->mode_config.mutex in order to protect itself from connector state changing under it. Because this entails grabbing kn->count, e.g. the lock that the kernel provides for protecting sysfs contexts, we end up grabbing kn->count followed by &drm->mode_config.mutex. This ends up creating an extremely rude chain: &kn->count -> &drm->mode_config.mutex -> crtc_ww_class_mutex/crtc_ww_class_acquire -> &connector->mutex I mean, look at that thing! It's just evil!!! This gross thing ends up making any calls to drm_connector_register()/drm_connector_unregister() impossible when holding any kind of modesetting lock. This is annoying because ideally, we always want to ensure that drm_dp_mst_port->connector never changes when doing an atomic commit or check that would affect the atomic topology state so that it can reliably and easily be used from future DRM DP MST helpers to assist with tasks such as scanning through the current VCPI allocations and adding connectors which need to have their allocations updated in response to a bandwidth change or the like. Being able to hold &mgr->base.lock throughout the entire link probe process would have been _great_, since we could prevent userspace from ever seeing any states in-between individual port changes and as a result likely end up with a much faster probe and more consistent results from said probes. But without some rework of how we handle connector probing in sysfs it's not at all currently possible. In the future, maybe we can try using the sysfs locks to protect updates to connector probing state and fix this mess. So for now, to protect everything other than port->connector under &mgr->base.lock and ensure that we still have the guarantee that atomic check/commit contexts will never see port->connector change we use a silly trick. See: port->connector only needs to change in order to ensure that input ports (see the MST spec) never have a ghost connector associated with them. But, there's nothing stopping us from simply throwing the entire port out and creating a new one in order to maintain that requirement while still keeping port->connector consistent across the lifetime of the port in atomic check/commit contexts. For all intended purposes this works fine, as we validate ports in any contexts we care about before using them and as such will end up reporting the connector as disconnected until it's port's destruction finalizes. So, we just do that in cases where we detect port->input has transitioned from true->false. We don't need to worry about the other direction, since a port without a connector isn't visible to userspace and as such doesn't need to be protected by &mgr->base.lock until we finish registering a connector for it. For updating members of drm_dp_mst_port other than port->connector, we simply grab &mgr->base.lock in drm_dp_mst_link_probe_work() for already registered ports, update said members and drop the lock before potentially registering a connector and probing the link address of it's children. Finally, we modify drm_dp_mst_detect_port() to take a modesetting lock acquisition context in order to acquire &mgr->base.lock under &connection_mutex and convert all it's users over to using the .detect_ctx probe hooks. With that, we finally have well defined locking. Changes since v4: * Get rid of port->mutex, stop using connection_mutex and just use our own modesetting lock - mgr->base.lock. Also, add a probe_lock that comes before this patch. * Just throw out ports that get changed from an output to an input, and replace them with new ports. This lets us ensure that modesetting contexts never see port->connector go from having a connector to being NULL. * Write an extremely detailed explanation of what problems this is trying to fix, since there's a _lot_ of context here and I honestly forgot some of it myself a couple times. * Don't grab mgr->lock when reading port->mstb in drm_dp_mst_handle_link_address_port(). It's not needed. Cc: Juston Li <juston.li@intel.com> Cc: Imre Deak <imre.deak@intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Harry Wentland <hwentlan@amd.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Sean Paul <sean@poorly.run> Signed-off-by: Lyude Paul <lyude@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191022023641.8026-7-lyude@redhat.com
2019-06-17 17:59:29 -04:00
static const struct drm_connector_helper_funcs
nv50_mstc_help = {
.get_modes = nv50_mstc_get_modes,
.mode_valid = nv50_mstc_mode_valid,
.atomic_best_encoder = nv50_mstc_atomic_best_encoder,
.atomic_check = nv50_mstc_atomic_check,
.detect_ctx = nv50_mstc_detect,
};
static void
nv50_mstc_destroy(struct drm_connector *connector)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
drm_connector_cleanup(&mstc->connector);
drm_dp_mst_put_port_malloc(mstc->port);
kfree(mstc);
}
static const struct drm_connector_funcs
nv50_mstc = {
.reset = nouveau_conn_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = nv50_mstc_destroy,
.atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
.atomic_destroy_state = nouveau_conn_atomic_destroy_state,
.atomic_set_property = nouveau_conn_atomic_set_property,
.atomic_get_property = nouveau_conn_atomic_get_property,
};
static int
nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
const char *path, struct nv50_mstc **pmstc)
{
struct drm_device *dev = mstm->outp->base.base.dev;
struct drm_crtc *crtc;
struct nv50_mstc *mstc;
int ret;
if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
return -ENOMEM;
mstc->mstm = mstm;
mstc->port = port;
ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
DRM_MODE_CONNECTOR_DisplayPort);
if (ret) {
kfree(*pmstc);
*pmstc = NULL;
return ret;
}
drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
mstc->connector.funcs->reset(&mstc->connector);
nouveau_conn_attach_properties(&mstc->connector);
drm_for_each_crtc(crtc, dev) {
if (!(mstm->outp->dcb->heads & drm_crtc_mask(crtc)))
continue;
drm_connector_attach_encoder(&mstc->connector,
&nv50_head(crtc)->msto->encoder);
}
drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
drm_connector_set_path_property(&mstc->connector, path);
drm_dp_mst_get_port_malloc(port);
return 0;
}
static void
nv50_mstm_cleanup(struct nv50_mstm *mstm)
{
struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
struct drm_encoder *encoder;
int ret;
NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
ret = drm_dp_check_act_status(&mstm->mgr);
ret = drm_dp_update_payload_part2(&mstm->mgr);
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
if (mstc && mstc->mstm == mstm)
nv50_msto_cleanup(msto);
}
}
mstm->modified = false;
}
static void
nv50_mstm_prepare(struct nv50_mstm *mstm)
{
struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
struct drm_encoder *encoder;
int ret;
NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
ret = drm_dp_update_payload_part1(&mstm->mgr);
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
struct nv50_msto *msto = nv50_msto(encoder);
struct nv50_mstc *mstc = msto->mstc;
if (mstc && mstc->mstm == mstm)
nv50_msto_prepare(msto);
}
}
if (mstm->disabled) {
if (!mstm->links)
nv50_outp_release(mstm->outp);
mstm->disabled = false;
}
}
static void
nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nv50_mstc *mstc = nv50_mstc(connector);
drm_connector_unregister(&mstc->connector);
drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
drm/nouveau: Fix deadlock in nv50_mstm_register_connector() Currently; we're grabbing all of the modesetting locks before adding MST connectors to fbdev. This isn't actually necessary, and causes a deadlock as well: ====================================================== WARNING: possible circular locking dependency detected 4.17.0-rc3Lyude-Test+ #1 Tainted: G O ------------------------------------------------------ kworker/1:0/18 is trying to acquire lock: 00000000c832f62d (&helper->lock){+.+.}, at: drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] but task is already holding lock: 00000000942e28e2 (crtc_ww_class_mutex){+.+.}, at: drm_modeset_backoff+0x8e/0x1c0 [drm] which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #3 (crtc_ww_class_mutex){+.+.}: ww_mutex_lock+0x43/0x80 drm_modeset_lock+0x71/0x130 [drm] drm_helper_probe_single_connector_modes+0x7d/0x6b0 [drm_kms_helper] drm_setup_crtcs+0x15e/0xc90 [drm_kms_helper] __drm_fb_helper_initial_config_and_unlock+0x29/0x480 [drm_kms_helper] nouveau_fbcon_init+0x138/0x1a0 [nouveau] nouveau_drm_load+0x173/0x7e0 [nouveau] drm_dev_register+0x134/0x1c0 [drm] drm_get_pci_dev+0x8e/0x160 [drm] nouveau_drm_probe+0x1a9/0x230 [nouveau] pci_device_probe+0xcd/0x150 driver_probe_device+0x30b/0x480 __driver_attach+0xbc/0xe0 bus_for_each_dev+0x67/0x90 bus_add_driver+0x164/0x260 driver_register+0x57/0xc0 do_one_initcall+0x4d/0x323 do_init_module+0x5b/0x1f8 load_module+0x20e5/0x2ac0 __do_sys_finit_module+0xb7/0xd0 do_syscall_64+0x60/0x1b0 entry_SYSCALL_64_after_hwframe+0x49/0xbe -> #2 (crtc_ww_class_acquire){+.+.}: drm_helper_probe_single_connector_modes+0x58/0x6b0 [drm_kms_helper] drm_setup_crtcs+0x15e/0xc90 [drm_kms_helper] __drm_fb_helper_initial_config_and_unlock+0x29/0x480 [drm_kms_helper] nouveau_fbcon_init+0x138/0x1a0 [nouveau] nouveau_drm_load+0x173/0x7e0 [nouveau] drm_dev_register+0x134/0x1c0 [drm] drm_get_pci_dev+0x8e/0x160 [drm] nouveau_drm_probe+0x1a9/0x230 [nouveau] pci_device_probe+0xcd/0x150 driver_probe_device+0x30b/0x480 __driver_attach+0xbc/0xe0 bus_for_each_dev+0x67/0x90 bus_add_driver+0x164/0x260 driver_register+0x57/0xc0 do_one_initcall+0x4d/0x323 do_init_module+0x5b/0x1f8 load_module+0x20e5/0x2ac0 __do_sys_finit_module+0xb7/0xd0 do_syscall_64+0x60/0x1b0 entry_SYSCALL_64_after_hwframe+0x49/0xbe -> #1 (&dev->mode_config.mutex){+.+.}: drm_setup_crtcs+0x10c/0xc90 [drm_kms_helper] __drm_fb_helper_initial_config_and_unlock+0x29/0x480 [drm_kms_helper] nouveau_fbcon_init+0x138/0x1a0 [nouveau] nouveau_drm_load+0x173/0x7e0 [nouveau] drm_dev_register+0x134/0x1c0 [drm] drm_get_pci_dev+0x8e/0x160 [drm] nouveau_drm_probe+0x1a9/0x230 [nouveau] pci_device_probe+0xcd/0x150 driver_probe_device+0x30b/0x480 __driver_attach+0xbc/0xe0 bus_for_each_dev+0x67/0x90 bus_add_driver+0x164/0x260 driver_register+0x57/0xc0 do_one_initcall+0x4d/0x323 do_init_module+0x5b/0x1f8 load_module+0x20e5/0x2ac0 __do_sys_finit_module+0xb7/0xd0 do_syscall_64+0x60/0x1b0 entry_SYSCALL_64_after_hwframe+0x49/0xbe -> #0 (&helper->lock){+.+.}: __mutex_lock+0x70/0x9d0 drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] nv50_mstm_register_connector+0x2c/0x50 [nouveau] drm_dp_add_port+0x2f5/0x420 [drm_kms_helper] drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper] drm_dp_add_port+0x33f/0x420 [drm_kms_helper] drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper] drm_dp_check_and_send_link_address+0x87/0xd0 [drm_kms_helper] drm_dp_mst_link_probe_work+0x4d/0x80 [drm_kms_helper] process_one_work+0x20d/0x650 worker_thread+0x3a/0x390 kthread+0x11e/0x140 ret_from_fork+0x3a/0x50 other info that might help us debug this: Chain exists of: &helper->lock --> crtc_ww_class_acquire --> crtc_ww_class_mutex Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(crtc_ww_class_mutex); lock(crtc_ww_class_acquire); lock(crtc_ww_class_mutex); lock(&helper->lock); *** DEADLOCK *** 5 locks held by kworker/1:0/18: #0: 000000004a05cd50 ((wq_completion)"events_long"){+.+.}, at: process_one_work+0x187/0x650 #1: 00000000601c11d1 ((work_completion)(&mgr->work)){+.+.}, at: process_one_work+0x187/0x650 #2: 00000000586ca0df (&dev->mode_config.mutex){+.+.}, at: drm_modeset_lock_all+0x3a/0x1b0 [drm] #3: 00000000d3ca0ffa (crtc_ww_class_acquire){+.+.}, at: drm_modeset_lock_all+0x44/0x1b0 [drm] #4: 00000000942e28e2 (crtc_ww_class_mutex){+.+.}, at: drm_modeset_backoff+0x8e/0x1c0 [drm] stack backtrace: CPU: 1 PID: 18 Comm: kworker/1:0 Tainted: G O 4.17.0-rc3Lyude-Test+ #1 Hardware name: Gateway FX6840/FX6840, BIOS P01-A3 05/17/2010 Workqueue: events_long drm_dp_mst_link_probe_work [drm_kms_helper] Call Trace: dump_stack+0x85/0xcb print_circular_bug.isra.38+0x1ce/0x1db __lock_acquire+0x128f/0x1350 ? lock_acquire+0x9f/0x200 ? lock_acquire+0x9f/0x200 ? __ww_mutex_lock.constprop.13+0x8f/0x1000 lock_acquire+0x9f/0x200 ? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] ? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] __mutex_lock+0x70/0x9d0 ? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] ? ww_mutex_lock+0x43/0x80 ? _cond_resched+0x15/0x30 ? ww_mutex_lock+0x43/0x80 ? drm_modeset_lock+0xb2/0x130 [drm] ? drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] drm_fb_helper_add_one_connector+0x2a/0x60 [drm_kms_helper] nv50_mstm_register_connector+0x2c/0x50 [nouveau] drm_dp_add_port+0x2f5/0x420 [drm_kms_helper] ? mark_held_locks+0x50/0x80 ? kfree+0xcf/0x2a0 ? drm_dp_check_mstb_guid+0xd6/0x120 [drm_kms_helper] ? trace_hardirqs_on_caller+0xed/0x180 ? drm_dp_check_mstb_guid+0xd6/0x120 [drm_kms_helper] drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper] drm_dp_add_port+0x33f/0x420 [drm_kms_helper] ? nouveau_connector_aux_xfer+0x7c/0xb0 [nouveau] ? find_held_lock+0x2d/0x90 ? drm_dp_dpcd_access+0xd9/0xf0 [drm_kms_helper] ? __mutex_unlock_slowpath+0x3b/0x280 ? drm_dp_dpcd_access+0xd9/0xf0 [drm_kms_helper] drm_dp_send_link_address+0x155/0x1e0 [drm_kms_helper] drm_dp_check_and_send_link_address+0x87/0xd0 [drm_kms_helper] drm_dp_mst_link_probe_work+0x4d/0x80 [drm_kms_helper] process_one_work+0x20d/0x650 worker_thread+0x3a/0x390 ? process_one_work+0x650/0x650 kthread+0x11e/0x140 ? kthread_create_worker_on_cpu+0x50/0x50 ret_from_fork+0x3a/0x50 Taking example from i915, the only time we need to hold any modesetting locks is when changing the port on the mstc, and in that case we only need to hold the connection mutex. Signed-off-by: Lyude Paul <lyude@redhat.com> Cc: Karol Herbst <kherbst@redhat.com> Cc: stable@vger.kernel.org Signed-off-by: Lyude Paul <lyude@redhat.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-05-02 19:38:48 -04:00
drm_connector_put(&mstc->connector);
}
static void
nv50_mstm_register_connector(struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
drm_connector_register(connector);
}
static struct drm_connector *
nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, const char *path)
{
struct nv50_mstm *mstm = nv50_mstm(mgr);
struct nv50_mstc *mstc;
int ret;
ret = nv50_mstc_new(mstm, port, path, &mstc);
if (ret)
return NULL;
return &mstc->connector;
}
static const struct drm_dp_mst_topology_cbs
nv50_mstm = {
.add_connector = nv50_mstm_add_connector,
.register_connector = nv50_mstm_register_connector,
.destroy_connector = nv50_mstm_destroy_connector,
};
void
nv50_mstm_service(struct nv50_mstm *mstm)
{
struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
bool handled = true;
int ret;
u8 esi[8] = {};
if (!aux)
return;
while (handled) {
ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
if (ret != 8) {
drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
return;
}
drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
if (!handled)
break;
drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], 3);
}
}
void
nv50_mstm_remove(struct nv50_mstm *mstm)
{
if (mstm)
drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
}
static int
nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
{
struct nouveau_encoder *outp = mstm->outp;
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_dp_mst_link_v0 mst;
} args = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_LINK,
.base.hasht = outp->dcb->hasht,
.base.hashm = outp->dcb->hashm,
.mst.state = state,
};
struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
struct nvif_object *disp = &drm->display->disp.object;
int ret;
if (dpcd >= 0x12) {
/* Even if we're enabling MST, start with disabling the
* branching unit to clear any sink-side MST topology state
* that wasn't set by us
*/
ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0);
if (ret < 0)
return ret;
if (state) {
/* Now, start initializing */
ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL,
DP_MST_EN);
if (ret < 0)
return ret;
}
}
return nvif_mthd(disp, 0, &args, sizeof(args));
}
int
nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
{
drm/nouveau: Only write DP_MSTM_CTRL when needed Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST hub every time it receives a long HPD pulse on DP. This isn't actually necessary and additionally, has some unintended side effects. With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to make it rather likely (1 out of 5 times usually) that bringing up MST with it's ThinkPad dock will fail and result in sideband messages timing out in the middle. Afterwards, successive probes don't manage to get the dock to communicate properly over MST sideband properly. Many times sideband message timeouts from MST hubs are indicative of either the source or the sink dropping an ESI event, which can cause DRM's perspective of the topology's current state to go out of sync with reality. While it's tough to really know for sure what's happening to the dock, using userspace tools to write to DP_MSTM_CTRL in the middle of the MST link probing process does appear to make things flaky. It's possible that when we write to DP_MSTM_CTRL, the function that gets triggered to respond in the dock's firmware temporarily puts it in a state where it might end up not reporting an ESI to the source, or ends up dropping a sideband message we sent it. So, to fix this we make it so that when probing an MST topology, we respect it's current state. If the dock's already enabled, we simply read DP_MSTM_CTRL and disable the topology if it's value is not what we expected. Otherwise, we perform the normal MST probing dance. We avoid taking any action except if the state of the MST topology actually changes. This fixes MST sideband message timeouts and detection failures on my P50 with its ThinkPad dock. Signed-off-by: Lyude Paul <lyude@redhat.com> Cc: stable@vger.kernel.org Cc: Karol Herbst <karolherbst@gmail.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-09 18:22:05 -04:00
struct drm_dp_aux *aux;
int ret;
bool old_state, new_state;
u8 mstm_ctrl;
if (!mstm)
return 0;
drm/nouveau: Only write DP_MSTM_CTRL when needed Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST hub every time it receives a long HPD pulse on DP. This isn't actually necessary and additionally, has some unintended side effects. With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to make it rather likely (1 out of 5 times usually) that bringing up MST with it's ThinkPad dock will fail and result in sideband messages timing out in the middle. Afterwards, successive probes don't manage to get the dock to communicate properly over MST sideband properly. Many times sideband message timeouts from MST hubs are indicative of either the source or the sink dropping an ESI event, which can cause DRM's perspective of the topology's current state to go out of sync with reality. While it's tough to really know for sure what's happening to the dock, using userspace tools to write to DP_MSTM_CTRL in the middle of the MST link probing process does appear to make things flaky. It's possible that when we write to DP_MSTM_CTRL, the function that gets triggered to respond in the dock's firmware temporarily puts it in a state where it might end up not reporting an ESI to the source, or ends up dropping a sideband message we sent it. So, to fix this we make it so that when probing an MST topology, we respect it's current state. If the dock's already enabled, we simply read DP_MSTM_CTRL and disable the topology if it's value is not what we expected. Otherwise, we perform the normal MST probing dance. We avoid taking any action except if the state of the MST topology actually changes. This fixes MST sideband message timeouts and detection failures on my P50 with its ThinkPad dock. Signed-off-by: Lyude Paul <lyude@redhat.com> Cc: stable@vger.kernel.org Cc: Karol Herbst <karolherbst@gmail.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-09 18:22:05 -04:00
mutex_lock(&mstm->mgr.lock);
old_state = mstm->mgr.mst_state;
new_state = old_state;
aux = mstm->mgr.aux;
if (old_state) {
/* Just check that the MST hub is still as we expect it */
ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl);
if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) {
DRM_DEBUG_KMS("Hub gone, disabling MST topology\n");
new_state = false;
}
} else if (dpcd[0] >= 0x12) {
ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]);
if (ret < 0)
drm/nouveau: Only write DP_MSTM_CTRL when needed Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST hub every time it receives a long HPD pulse on DP. This isn't actually necessary and additionally, has some unintended side effects. With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to make it rather likely (1 out of 5 times usually) that bringing up MST with it's ThinkPad dock will fail and result in sideband messages timing out in the middle. Afterwards, successive probes don't manage to get the dock to communicate properly over MST sideband properly. Many times sideband message timeouts from MST hubs are indicative of either the source or the sink dropping an ESI event, which can cause DRM's perspective of the topology's current state to go out of sync with reality. While it's tough to really know for sure what's happening to the dock, using userspace tools to write to DP_MSTM_CTRL in the middle of the MST link probing process does appear to make things flaky. It's possible that when we write to DP_MSTM_CTRL, the function that gets triggered to respond in the dock's firmware temporarily puts it in a state where it might end up not reporting an ESI to the source, or ends up dropping a sideband message we sent it. So, to fix this we make it so that when probing an MST topology, we respect it's current state. If the dock's already enabled, we simply read DP_MSTM_CTRL and disable the topology if it's value is not what we expected. Otherwise, we perform the normal MST probing dance. We avoid taking any action except if the state of the MST topology actually changes. This fixes MST sideband message timeouts and detection failures on my P50 with its ThinkPad dock. Signed-off-by: Lyude Paul <lyude@redhat.com> Cc: stable@vger.kernel.org Cc: Karol Herbst <karolherbst@gmail.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-09 18:22:05 -04:00
goto probe_error;
if (!(dpcd[1] & DP_MST_CAP))
dpcd[0] = 0x11;
else
drm/nouveau: Only write DP_MSTM_CTRL when needed Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST hub every time it receives a long HPD pulse on DP. This isn't actually necessary and additionally, has some unintended side effects. With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to make it rather likely (1 out of 5 times usually) that bringing up MST with it's ThinkPad dock will fail and result in sideband messages timing out in the middle. Afterwards, successive probes don't manage to get the dock to communicate properly over MST sideband properly. Many times sideband message timeouts from MST hubs are indicative of either the source or the sink dropping an ESI event, which can cause DRM's perspective of the topology's current state to go out of sync with reality. While it's tough to really know for sure what's happening to the dock, using userspace tools to write to DP_MSTM_CTRL in the middle of the MST link probing process does appear to make things flaky. It's possible that when we write to DP_MSTM_CTRL, the function that gets triggered to respond in the dock's firmware temporarily puts it in a state where it might end up not reporting an ESI to the source, or ends up dropping a sideband message we sent it. So, to fix this we make it so that when probing an MST topology, we respect it's current state. If the dock's already enabled, we simply read DP_MSTM_CTRL and disable the topology if it's value is not what we expected. Otherwise, we perform the normal MST probing dance. We avoid taking any action except if the state of the MST topology actually changes. This fixes MST sideband message timeouts and detection failures on my P50 with its ThinkPad dock. Signed-off-by: Lyude Paul <lyude@redhat.com> Cc: stable@vger.kernel.org Cc: Karol Herbst <karolherbst@gmail.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-09 18:22:05 -04:00
new_state = allow;
}
drm/nouveau: Only write DP_MSTM_CTRL when needed Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST hub every time it receives a long HPD pulse on DP. This isn't actually necessary and additionally, has some unintended side effects. With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to make it rather likely (1 out of 5 times usually) that bringing up MST with it's ThinkPad dock will fail and result in sideband messages timing out in the middle. Afterwards, successive probes don't manage to get the dock to communicate properly over MST sideband properly. Many times sideband message timeouts from MST hubs are indicative of either the source or the sink dropping an ESI event, which can cause DRM's perspective of the topology's current state to go out of sync with reality. While it's tough to really know for sure what's happening to the dock, using userspace tools to write to DP_MSTM_CTRL in the middle of the MST link probing process does appear to make things flaky. It's possible that when we write to DP_MSTM_CTRL, the function that gets triggered to respond in the dock's firmware temporarily puts it in a state where it might end up not reporting an ESI to the source, or ends up dropping a sideband message we sent it. So, to fix this we make it so that when probing an MST topology, we respect it's current state. If the dock's already enabled, we simply read DP_MSTM_CTRL and disable the topology if it's value is not what we expected. Otherwise, we perform the normal MST probing dance. We avoid taking any action except if the state of the MST topology actually changes. This fixes MST sideband message timeouts and detection failures on my P50 with its ThinkPad dock. Signed-off-by: Lyude Paul <lyude@redhat.com> Cc: stable@vger.kernel.org Cc: Karol Herbst <karolherbst@gmail.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-09 18:22:05 -04:00
if (new_state == old_state) {
mutex_unlock(&mstm->mgr.lock);
return new_state;
}
ret = nv50_mstm_enable(mstm, dpcd[0], new_state);
if (ret)
drm/nouveau: Only write DP_MSTM_CTRL when needed Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST hub every time it receives a long HPD pulse on DP. This isn't actually necessary and additionally, has some unintended side effects. With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to make it rather likely (1 out of 5 times usually) that bringing up MST with it's ThinkPad dock will fail and result in sideband messages timing out in the middle. Afterwards, successive probes don't manage to get the dock to communicate properly over MST sideband properly. Many times sideband message timeouts from MST hubs are indicative of either the source or the sink dropping an ESI event, which can cause DRM's perspective of the topology's current state to go out of sync with reality. While it's tough to really know for sure what's happening to the dock, using userspace tools to write to DP_MSTM_CTRL in the middle of the MST link probing process does appear to make things flaky. It's possible that when we write to DP_MSTM_CTRL, the function that gets triggered to respond in the dock's firmware temporarily puts it in a state where it might end up not reporting an ESI to the source, or ends up dropping a sideband message we sent it. So, to fix this we make it so that when probing an MST topology, we respect it's current state. If the dock's already enabled, we simply read DP_MSTM_CTRL and disable the topology if it's value is not what we expected. Otherwise, we perform the normal MST probing dance. We avoid taking any action except if the state of the MST topology actually changes. This fixes MST sideband message timeouts and detection failures on my P50 with its ThinkPad dock. Signed-off-by: Lyude Paul <lyude@redhat.com> Cc: stable@vger.kernel.org Cc: Karol Herbst <karolherbst@gmail.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-09 18:22:05 -04:00
goto probe_error;
mutex_unlock(&mstm->mgr.lock);
drm/nouveau: Only write DP_MSTM_CTRL when needed Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST hub every time it receives a long HPD pulse on DP. This isn't actually necessary and additionally, has some unintended side effects. With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to make it rather likely (1 out of 5 times usually) that bringing up MST with it's ThinkPad dock will fail and result in sideband messages timing out in the middle. Afterwards, successive probes don't manage to get the dock to communicate properly over MST sideband properly. Many times sideband message timeouts from MST hubs are indicative of either the source or the sink dropping an ESI event, which can cause DRM's perspective of the topology's current state to go out of sync with reality. While it's tough to really know for sure what's happening to the dock, using userspace tools to write to DP_MSTM_CTRL in the middle of the MST link probing process does appear to make things flaky. It's possible that when we write to DP_MSTM_CTRL, the function that gets triggered to respond in the dock's firmware temporarily puts it in a state where it might end up not reporting an ESI to the source, or ends up dropping a sideband message we sent it. So, to fix this we make it so that when probing an MST topology, we respect it's current state. If the dock's already enabled, we simply read DP_MSTM_CTRL and disable the topology if it's value is not what we expected. Otherwise, we perform the normal MST probing dance. We avoid taking any action except if the state of the MST topology actually changes. This fixes MST sideband message timeouts and detection failures on my P50 with its ThinkPad dock. Signed-off-by: Lyude Paul <lyude@redhat.com> Cc: stable@vger.kernel.org Cc: Karol Herbst <karolherbst@gmail.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-09 18:22:05 -04:00
ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state);
if (ret)
return nv50_mstm_enable(mstm, dpcd[0], 0);
drm/nouveau: Only write DP_MSTM_CTRL when needed Currently, nouveau will re-write the DP_MSTM_CTRL register for an MST hub every time it receives a long HPD pulse on DP. This isn't actually necessary and additionally, has some unintended side effects. With the P50 I've got here, rewriting DP_MSTM_CTRL constantly seems to make it rather likely (1 out of 5 times usually) that bringing up MST with it's ThinkPad dock will fail and result in sideband messages timing out in the middle. Afterwards, successive probes don't manage to get the dock to communicate properly over MST sideband properly. Many times sideband message timeouts from MST hubs are indicative of either the source or the sink dropping an ESI event, which can cause DRM's perspective of the topology's current state to go out of sync with reality. While it's tough to really know for sure what's happening to the dock, using userspace tools to write to DP_MSTM_CTRL in the middle of the MST link probing process does appear to make things flaky. It's possible that when we write to DP_MSTM_CTRL, the function that gets triggered to respond in the dock's firmware temporarily puts it in a state where it might end up not reporting an ESI to the source, or ends up dropping a sideband message we sent it. So, to fix this we make it so that when probing an MST topology, we respect it's current state. If the dock's already enabled, we simply read DP_MSTM_CTRL and disable the topology if it's value is not what we expected. Otherwise, we perform the normal MST probing dance. We avoid taking any action except if the state of the MST topology actually changes. This fixes MST sideband message timeouts and detection failures on my P50 with its ThinkPad dock. Signed-off-by: Lyude Paul <lyude@redhat.com> Cc: stable@vger.kernel.org Cc: Karol Herbst <karolherbst@gmail.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-09 18:22:05 -04:00
return new_state;
probe_error:
mutex_unlock(&mstm->mgr.lock);
return ret;
}
static void
nv50_mstm_fini(struct nv50_mstm *mstm)
{
if (mstm && mstm->mgr.mst_state)
drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
}
static void
drm/dp_mst: Add basic topology reprobing when resuming Finally! For a very long time, our MST helpers have had one very annoying issue: They don't know how to reprobe the topology state when coming out of suspend. This means that if a user has a machine connected to an MST topology and decides to suspend their machine, we lose all topology changes that happened during that period. That can be a big problem if the machine was connected to a different topology on the same port before resuming, as we won't bother reprobing any of the ports and likely cause the user's monitors not to come back up as expected. So, we start fixing this by teaching our MST helpers how to reprobe the link addresses of each connected topology when resuming. As it turns out, the behavior that we want here is identical to the behavior we want when initially probing a newly connected MST topology, with a couple of important differences: - We need to be more careful about handling the potential races between events from the MST hub that could change the topology state as we're performing the link address reprobe - We need to be more careful about handling unlikely state changes on ports - such as an input port turning into an output port, something that would be far more likely to happen in situations like the MST hub we're connected to being changed while we're suspend Both of which have been solved by previous commits. That leaves one requirement: - We need to prune any MST ports in our in-memory topology state that were present when suspending, but have not appeared in the post-resume link address response from their parent branch device Which we can now handle in this commit by modifying drm_dp_send_link_address(). We then introduce suspend/resume reprobing by introducing drm_dp_mst_topology_mgr_invalidate_mstb(), which we call in drm_dp_mst_topology_mgr_suspend() to traverse the in-memory topology state to indicate that each mstb needs it's link address resent and PBN resources reprobed. On resume, we start back up &mgr->work and have it reprobe the topology in the same way we would on a hotplug, removing any leftover ports that no longer appear in the topology state. Changes since v4: * Split indenting changes in drm_dp_mst_topology_mgr_resume() into a separate patch * Only fire hotplugs when something has actually changed after a link address probe * Don't try to change port->connector at all on ports, just throw out ports that need their connectors removed to make things easier. Cc: Juston Li <juston.li@intel.com> Cc: Imre Deak <imre.deak@intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Harry Wentland <hwentlan@amd.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Sean Paul <sean@poorly.run> Signed-off-by: Lyude Paul <lyude@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191022023641.8026-14-lyude@redhat.com
2019-06-17 19:57:33 -04:00
nv50_mstm_init(struct nv50_mstm *mstm, bool runtime)
{
int ret;
if (!mstm || !mstm->mgr.mst_state)
return;
drm/dp_mst: Add basic topology reprobing when resuming Finally! For a very long time, our MST helpers have had one very annoying issue: They don't know how to reprobe the topology state when coming out of suspend. This means that if a user has a machine connected to an MST topology and decides to suspend their machine, we lose all topology changes that happened during that period. That can be a big problem if the machine was connected to a different topology on the same port before resuming, as we won't bother reprobing any of the ports and likely cause the user's monitors not to come back up as expected. So, we start fixing this by teaching our MST helpers how to reprobe the link addresses of each connected topology when resuming. As it turns out, the behavior that we want here is identical to the behavior we want when initially probing a newly connected MST topology, with a couple of important differences: - We need to be more careful about handling the potential races between events from the MST hub that could change the topology state as we're performing the link address reprobe - We need to be more careful about handling unlikely state changes on ports - such as an input port turning into an output port, something that would be far more likely to happen in situations like the MST hub we're connected to being changed while we're suspend Both of which have been solved by previous commits. That leaves one requirement: - We need to prune any MST ports in our in-memory topology state that were present when suspending, but have not appeared in the post-resume link address response from their parent branch device Which we can now handle in this commit by modifying drm_dp_send_link_address(). We then introduce suspend/resume reprobing by introducing drm_dp_mst_topology_mgr_invalidate_mstb(), which we call in drm_dp_mst_topology_mgr_suspend() to traverse the in-memory topology state to indicate that each mstb needs it's link address resent and PBN resources reprobed. On resume, we start back up &mgr->work and have it reprobe the topology in the same way we would on a hotplug, removing any leftover ports that no longer appear in the topology state. Changes since v4: * Split indenting changes in drm_dp_mst_topology_mgr_resume() into a separate patch * Only fire hotplugs when something has actually changed after a link address probe * Don't try to change port->connector at all on ports, just throw out ports that need their connectors removed to make things easier. Cc: Juston Li <juston.li@intel.com> Cc: Imre Deak <imre.deak@intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Harry Wentland <hwentlan@amd.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Sean Paul <sean@poorly.run> Signed-off-by: Lyude Paul <lyude@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191022023641.8026-14-lyude@redhat.com
2019-06-17 19:57:33 -04:00
ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime);
if (ret == -1) {
drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
drm_kms_helper_hotplug_event(mstm->mgr.dev);
}
}
static void
nv50_mstm_del(struct nv50_mstm **pmstm)
{
struct nv50_mstm *mstm = *pmstm;
if (mstm) {
drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
kfree(*pmstm);
*pmstm = NULL;
}
}
static int
nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
int conn_base_id, struct nv50_mstm **pmstm)
{
const int max_payloads = hweight8(outp->dcb->heads);
struct drm_device *dev = outp->base.base.dev;
struct nv50_mstm *mstm;
int ret;
u8 dpcd;
/* This is a workaround for some monitors not functioning
* correctly in MST mode on initial module load. I think
* some bad interaction with the VBIOS may be responsible.
*
* A good ol' off and on again seems to work here ;)
*/
ret = drm_dp_dpcd_readb(aux, DP_DPCD_REV, &dpcd);
if (ret >= 0 && dpcd >= 0x12)
drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
return -ENOMEM;
mstm->outp = outp;
mstm->mgr.cbs = &nv50_mstm;
ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
max_payloads, conn_base_id);
if (ret)
return ret;
return 0;
}
/******************************************************************************
* SOR
*****************************************************************************/
static void
nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
struct nv50_head_atom *asyh, u8 proto, u8 depth)
{
struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
struct nv50_core *core = disp->core;
if (!asyh) {
nv_encoder->ctrl &= ~BIT(head);
if (!(nv_encoder->ctrl & 0x0000000f))
nv_encoder->ctrl = 0;
} else {
nv_encoder->ctrl |= proto << 8;
nv_encoder->ctrl |= BIT(head);
asyh->or.depth = depth;
}
core->func->sor->ctrl(core, nv_encoder->or, nv_encoder->ctrl, asyh);
}
static void
nv50_sor_disable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
nv_encoder->crtc = NULL;
if (nv_crtc) {
struct nvkm_i2c_aux *aux = nv_encoder->aux;
u8 pwr;
if (aux) {
int ret = nvkm_rdaux(aux, DP_SET_POWER, &pwr, 1);
if (ret == 0) {
pwr &= ~DP_SET_POWER_MASK;
pwr |= DP_SET_POWER_D3;
nvkm_wraux(aux, DP_SET_POWER, &pwr, 1);
}
}
nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
nv50_audio_disable(encoder, nv_crtc);
nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
nv50_outp_release(nv_encoder);
}
}
static void
nv50_sor_enable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
struct drm_display_mode *mode = &asyh->state.adjusted_mode;
struct {
struct nv50_disp_mthd_v1 base;
struct nv50_disp_sor_lvds_script_v0 lvds;
} lvds = {
.base.version = 1,
.base.method = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT,
.base.hasht = nv_encoder->dcb->hasht,
.base.hashm = nv_encoder->dcb->hashm,
};
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector;
struct nvbios *bios = &drm->vbios;
u8 proto = 0xf;
u8 depth = 0x0;
nv_connector = nouveau_encoder_connector_get(nv_encoder);
nv_encoder->crtc = encoder->crtc;
nv50_outp_acquire(nv_encoder);
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_TMDS:
if (nv_encoder->link & 1) {
proto = 0x1;
/* Only enable dual-link if:
* - Need to (i.e. rate > 165MHz)
* - DCB says we can
* - Not an HDMI monitor, since there's no dual-link
* on HDMI.
*/
if (mode->clock >= 165000 &&
nv_encoder->dcb->duallink_possible &&
!drm_detect_hdmi_monitor(nv_connector->edid))
proto |= 0x4;
} else {
proto = 0x2;
}
nv50_hdmi_enable(&nv_encoder->base.base, mode);
break;
case DCB_OUTPUT_LVDS:
proto = 0x0;
if (bios->fp_no_ddc) {
if (bios->fp.dual_link)
lvds.lvds.script |= 0x0100;
if (bios->fp.if_is_24bit)
lvds.lvds.script |= 0x0200;
} else {
if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
if (((u8 *)nv_connector->edid)[121] == 2)
lvds.lvds.script |= 0x0100;
} else
if (mode->clock >= bios->fp.duallink_transition_clk) {
lvds.lvds.script |= 0x0100;
}
if (lvds.lvds.script & 0x0100) {
if (bios->fp.strapless_is_24bit & 2)
lvds.lvds.script |= 0x0200;
} else {
if (bios->fp.strapless_is_24bit & 1)
lvds.lvds.script |= 0x0200;
}
if (asyh->or.bpc == 8)
lvds.lvds.script |= 0x0200;
}
nvif_mthd(&disp->disp->object, 0, &lvds, sizeof(lvds));
break;
case DCB_OUTPUT_DP:
depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
if (nv_encoder->link & 1)
proto = 0x8;
else
proto = 0x9;
nv50_audio_enable(encoder, mode);
break;
default:
BUG();
break;
}
nv_encoder->update(nv_encoder, nv_crtc->index, asyh, proto, depth);
}
static const struct drm_encoder_helper_funcs
nv50_sor_help = {
.atomic_check = nv50_outp_atomic_check,
.enable = nv50_sor_enable,
.disable = nv50_sor_disable,
};
static void
nv50_sor_destroy(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
nv50_mstm_del(&nv_encoder->dp.mstm);
drm_encoder_cleanup(encoder);
kfree(encoder);
}
static const struct drm_encoder_funcs
nv50_sor_func = {
.destroy = nv50_sor_destroy,
};
static bool nv50_has_mst(struct nouveau_drm *drm)
{
struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
u32 data;
u8 ver, hdr, cnt, len;
data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len);
return data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04);
}
static int
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int type, ret;
switch (dcbe->type) {
case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
case DCB_OUTPUT_TMDS:
case DCB_OUTPUT_DP:
default:
type = DRM_MODE_ENCODER_TMDS;
break;
}
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->update = nv50_sor_update;
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
"sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_sor_help);
drm_connector_attach_encoder(connector, encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
struct nv50_disp *disp = nv50_disp(encoder->dev);
struct nvkm_i2c_aux *aux =
nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
if (aux) {
if (disp->disp->object.oclass < GF110_DISP) {
/* HW has no support for address-only
* transactions, so we're required to
* use custom I2C-over-AUX code.
*/
nv_encoder->i2c = &aux->i2c;
} else {
nv_encoder->i2c = &nv_connector->aux.ddc;
}
nv_encoder->aux = aux;
}
if (nv_connector->type != DCB_CONNECTOR_eDP &&
nv50_has_mst(drm)) {
ret = nv50_mstm_new(nv_encoder, &nv_connector->aux,
16, nv_connector->base.base.id,
&nv_encoder->dp.mstm);
if (ret)
return ret;
}
} else {
struct nvkm_i2c_bus *bus =
nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
if (bus)
nv_encoder->i2c = &bus->i2c;
}
return 0;
}
/******************************************************************************
* PIOR
*****************************************************************************/
static int
nv50_pior_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
if (ret)
return ret;
crtc_state->adjusted_mode.clock *= 2;
return 0;
}
static void
nv50_pior_disable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
if (nv_encoder->crtc)
core->func->pior->ctrl(core, nv_encoder->or, 0x00000000, NULL);
nv_encoder->crtc = NULL;
nv50_outp_release(nv_encoder);
}
static void
nv50_pior_enable(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
u8 owner = 1 << nv_crtc->index;
u8 proto;
nv50_outp_acquire(nv_encoder);
switch (asyh->or.bpc) {
case 10: asyh->or.depth = 0x6; break;
case 8: asyh->or.depth = 0x5; break;
case 6: asyh->or.depth = 0x2; break;
default: asyh->or.depth = 0x0; break;
}
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_TMDS:
case DCB_OUTPUT_DP:
proto = 0x0;
break;
default:
BUG();
break;
}
core->func->pior->ctrl(core, nv_encoder->or, (proto << 8) | owner, asyh);
nv_encoder->crtc = encoder->crtc;
}
static const struct drm_encoder_helper_funcs
nv50_pior_help = {
.atomic_check = nv50_pior_atomic_check,
.enable = nv50_pior_enable,
.disable = nv50_pior_disable,
};
static void
nv50_pior_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
kfree(encoder);
}
static const struct drm_encoder_funcs
nv50_pior_func = {
.destroy = nv50_pior_destroy,
};
static int
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus = NULL;
struct nvkm_i2c_aux *aux = NULL;
struct i2c_adapter *ddc;
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
int type;
switch (dcbe->type) {
case DCB_OUTPUT_TMDS:
bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
ddc = bus ? &bus->i2c : NULL;
type = DRM_MODE_ENCODER_TMDS;
break;
case DCB_OUTPUT_DP:
aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
ddc = aux ? &aux->i2c : NULL;
type = DRM_MODE_ENCODER_TMDS;
break;
default:
return -ENODEV;
}
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
nv_encoder->dcb = dcbe;
nv_encoder->i2c = ddc;
nv_encoder->aux = aux;
encoder = to_drm_encoder(nv_encoder);
encoder->possible_crtcs = dcbe->heads;
encoder->possible_clones = 0;
drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
"pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_pior_help);
drm_connector_attach_encoder(connector, encoder);
return 0;
}
/******************************************************************************
* Atomic
*****************************************************************************/
static void
nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
{
struct nouveau_drm *drm = nouveau_drm(state->dev);
struct nv50_disp *disp = nv50_disp(drm->dev);
struct nv50_core *core = disp->core;
struct nv50_mstm *mstm;
struct drm_encoder *encoder;
NV_ATOMIC(drm, "commit core %08x\n", interlock[NV50_DISP_INTERLOCK_BASE]);
drm_for_each_encoder(encoder, drm->dev) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
mstm = nouveau_encoder(encoder)->dp.mstm;
if (mstm && mstm->modified)
nv50_mstm_prepare(mstm);
}
}
core->func->ntfy_init(disp->sync, NV50_DISP_CORE_NTFY);
core->func->update(core, interlock, true);
if (core->func->ntfy_wait_done(disp->sync, NV50_DISP_CORE_NTFY,
disp->core->chan.base.device))
NV_ERROR(drm, "core notifier timeout\n");
drm_for_each_encoder(encoder, drm->dev) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
mstm = nouveau_encoder(encoder)->dp.mstm;
if (mstm && mstm->modified)
nv50_mstm_cleanup(mstm);
}
}
}
static void
nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock)
{
struct drm_plane_state *new_plane_state;
struct drm_plane *plane;
int i;
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw *wndw = nv50_wndw(plane);
if (interlock[wndw->interlock.type] & wndw->interlock.data) {
if (wndw->func->update)
wndw->func->update(wndw, interlock);
}
}
}
static void
nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_crtc_state *new_crtc_state, *old_crtc_state;
struct drm_crtc *crtc;
struct drm_plane_state *new_plane_state;
struct drm_plane *plane;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
struct nv50_atom *atom = nv50_atom(state);
struct nv50_outp_atom *outp, *outt;
u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {};
int i;
NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
drm_atomic_helper_wait_for_fences(dev, state, false);
drm_atomic_helper_wait_for_dependencies(state);
drm_atomic_helper_update_legacy_modeset_state(dev, state);
if (atom->lock_core)
mutex_lock(&disp->mutex);
/* Disable head(s). */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
struct nv50_head *head = nv50_head(crtc);
NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
asyh->clr.mask, asyh->set.mask);
if (old_crtc_state->active && !new_crtc_state->active) {
pm_runtime_put_noidle(dev->dev);
drm_crtc_vblank_off(crtc);
}
if (asyh->clr.mask) {
nv50_head_flush_clr(head, asyh, atom->flush_disable);
interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
}
}
/* Disable plane(s). */
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
asyw->clr.mask, asyw->set.mask);
if (!asyw->clr.mask)
continue;
nv50_wndw_flush_clr(wndw, interlock, atom->flush_disable, asyw);
}
/* Disable output path(s). */
list_for_each_entry(outp, &atom->outp, head) {
const struct drm_encoder_helper_funcs *help;
struct drm_encoder *encoder;
encoder = outp->encoder;
help = encoder->helper_private;
NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
outp->clr.mask, outp->set.mask);
if (outp->clr.mask) {
help->disable(encoder);
interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
if (outp->flush_disable) {
nv50_disp_atomic_commit_wndw(state, interlock);
nv50_disp_atomic_commit_core(state, interlock);
memset(interlock, 0x00, sizeof(interlock));
}
}
}
/* Flush disable. */
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
if (atom->flush_disable) {
nv50_disp_atomic_commit_wndw(state, interlock);
nv50_disp_atomic_commit_core(state, interlock);
memset(interlock, 0x00, sizeof(interlock));
}
}
/* Update output path(s). */
list_for_each_entry_safe(outp, outt, &atom->outp, head) {
const struct drm_encoder_helper_funcs *help;
struct drm_encoder *encoder;
encoder = outp->encoder;
help = encoder->helper_private;
NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
outp->set.mask, outp->clr.mask);
if (outp->set.mask) {
help->enable(encoder);
interlock[NV50_DISP_INTERLOCK_CORE] = 1;
}
list_del(&outp->head);
kfree(outp);
}
/* Update head(s). */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
struct nv50_head *head = nv50_head(crtc);
NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
asyh->set.mask, asyh->clr.mask);
if (asyh->set.mask) {
nv50_head_flush_set(head, asyh);
interlock[NV50_DISP_INTERLOCK_CORE] = 1;
}
if (new_crtc_state->active) {
if (!old_crtc_state->active) {
drm_crtc_vblank_on(crtc);
pm_runtime_get_noresume(dev->dev);
}
if (new_crtc_state->event)
drm_crtc_vblank_get(crtc);
}
}
/* Update plane(s). */
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
asyw->set.mask, asyw->clr.mask);
if ( !asyw->set.mask &&
(!asyw->clr.mask || atom->flush_disable))
continue;
nv50_wndw_flush_set(wndw, interlock, asyw);
}
/* Flush update. */
nv50_disp_atomic_commit_wndw(state, interlock);
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
if (interlock[NV50_DISP_INTERLOCK_BASE] ||
interlock[NV50_DISP_INTERLOCK_OVLY] ||
interlock[NV50_DISP_INTERLOCK_WNDW] ||
!atom->state.legacy_cursor_update)
nv50_disp_atomic_commit_core(state, interlock);
else
disp->core->func->update(disp->core, interlock, false);
}
if (atom->lock_core)
mutex_unlock(&disp->mutex);
/* Wait for HW to signal completion. */
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
int ret = nv50_wndw_wait_armed(wndw, asyw);
if (ret)
NV_ERROR(drm, "%s: timeout\n", plane->name);
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->event) {
unsigned long flags;
/* Get correct count/ts if racing with vblank irq */
if (new_crtc_state->active)
drm_crtc_accurate_vblank_count(crtc);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
new_crtc_state->event = NULL;
if (new_crtc_state->active)
drm_crtc_vblank_put(crtc);
}
}
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_helper_commit_cleanup_done(state);
drm_atomic_state_put(state);
/* Drop the RPM ref we got from nv50_disp_atomic_commit() */
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
}
static void
nv50_disp_atomic_commit_work(struct work_struct *work)
{
struct drm_atomic_state *state =
container_of(work, typeof(*state), commit_work);
nv50_disp_atomic_commit_tail(state);
}
static int
nv50_disp_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock)
{
struct drm_plane_state *new_plane_state;
struct drm_plane *plane;
int ret, i;
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES)
return ret;
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
goto done;
INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
goto done;
if (!nonblock) {
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
if (ret)
goto err_cleanup;
}
ret = drm_atomic_helper_swap_state(state, true);
if (ret)
goto err_cleanup;
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_wndw *wndw = nv50_wndw(plane);
if (asyw->set.image)
nv50_wndw_ntfy_enable(wndw, asyw);
}
drm_atomic_state_get(state);
/*
* Grab another RPM ref for the commit tail, which will release the
* ref when it's finished
*/
pm_runtime_get_noresume(dev->dev);
if (nonblock)
queue_work(system_unbound_wq, &state->commit_work);
else
nv50_disp_atomic_commit_tail(state);
err_cleanup:
if (ret)
drm_atomic_helper_cleanup_planes(dev, state);
done:
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
static struct nv50_outp_atom *
nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
{
struct nv50_outp_atom *outp;
list_for_each_entry(outp, &atom->outp, head) {
if (outp->encoder == encoder)
return outp;
}
outp = kzalloc(sizeof(*outp), GFP_KERNEL);
if (!outp)
return ERR_PTR(-ENOMEM);
list_add(&outp->head, &atom->outp);
outp->encoder = encoder;
return outp;
}
static int
nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
struct drm_connector_state *old_connector_state)
{
struct drm_encoder *encoder = old_connector_state->best_encoder;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
struct nv50_outp_atom *outp;
if (!(crtc = old_connector_state->crtc))
return 0;
old_crtc_state = drm_atomic_get_old_crtc_state(&atom->state, crtc);
new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
if (old_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
outp = nv50_disp_outp_atomic_add(atom, encoder);
if (IS_ERR(outp))
return PTR_ERR(outp);
if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
outp->flush_disable = true;
atom->flush_disable = true;
}
outp->clr.ctrl = true;
atom->lock_core = true;
}
return 0;
}
static int
nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
struct drm_connector_state *connector_state)
{
struct drm_encoder *encoder = connector_state->best_encoder;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
struct nv50_outp_atom *outp;
if (!(crtc = connector_state->crtc))
return 0;
new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
if (new_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
outp = nv50_disp_outp_atomic_add(atom, encoder);
if (IS_ERR(outp))
return PTR_ERR(outp);
outp->set.ctrl = true;
atom->lock_core = true;
}
return 0;
}
static int
nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
struct nv50_atom *atom = nv50_atom(state);
struct drm_connector_state *old_connector_state, *new_connector_state;
struct drm_connector *connector;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
int ret, i;
/* We need to handle colour management on a per-plane basis. */
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->color_mgmt_changed) {
ret = drm_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
}
}
ret = drm_atomic_helper_check(dev, state);
if (ret)
return ret;
for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
ret = nv50_disp_outp_atomic_check_clr(atom, old_connector_state);
if (ret)
return ret;
ret = nv50_disp_outp_atomic_check_set(atom, new_connector_state);
if (ret)
return ret;
}
ret = drm_dp_mst_atomic_check(state);
if (ret)
return ret;
return 0;
}
static void
nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
{
struct nv50_atom *atom = nv50_atom(state);
struct nv50_outp_atom *outp, *outt;
list_for_each_entry_safe(outp, outt, &atom->outp, head) {
list_del(&outp->head);
kfree(outp);
}
drm_atomic_state_default_clear(state);
}
static void
nv50_disp_atomic_state_free(struct drm_atomic_state *state)
{
struct nv50_atom *atom = nv50_atom(state);
drm_atomic_state_default_release(&atom->state);
kfree(atom);
}
static struct drm_atomic_state *
nv50_disp_atomic_state_alloc(struct drm_device *dev)
{
struct nv50_atom *atom;
if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
drm_atomic_state_init(dev, &atom->state) < 0) {
kfree(atom);
return NULL;
}
INIT_LIST_HEAD(&atom->outp);
return &atom->state;
}
static const struct drm_mode_config_funcs
nv50_disp_func = {
.fb_create = nouveau_user_framebuffer_create,
drm/nouveau/drm/nouveau: Fix deadlock with fb_helper with async RPM requests Currently, nouveau uses the generic drm_fb_helper_output_poll_changed() function provided by DRM as it's output_poll_changed callback. Unfortunately however, this function doesn't grab runtime PM references early enough and even if it did-we can't block waiting for the device to resume in output_poll_changed() since it's very likely that we'll need to grab the fb_helper lock at some point during the runtime resume process. This currently results in deadlocking like so: [ 246.669625] INFO: task kworker/4:0:37 blocked for more than 120 seconds. [ 246.673398] Not tainted 4.18.0-rc5Lyude-Test+ #2 [ 246.675271] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 246.676527] kworker/4:0 D 0 37 2 0x80000000 [ 246.677580] Workqueue: events output_poll_execute [drm_kms_helper] [ 246.678704] Call Trace: [ 246.679753] __schedule+0x322/0xaf0 [ 246.680916] schedule+0x33/0x90 [ 246.681924] schedule_preempt_disabled+0x15/0x20 [ 246.683023] __mutex_lock+0x569/0x9a0 [ 246.684035] ? kobject_uevent_env+0x117/0x7b0 [ 246.685132] ? drm_fb_helper_hotplug_event.part.28+0x20/0xb0 [drm_kms_helper] [ 246.686179] mutex_lock_nested+0x1b/0x20 [ 246.687278] ? mutex_lock_nested+0x1b/0x20 [ 246.688307] drm_fb_helper_hotplug_event.part.28+0x20/0xb0 [drm_kms_helper] [ 246.689420] drm_fb_helper_output_poll_changed+0x23/0x30 [drm_kms_helper] [ 246.690462] drm_kms_helper_hotplug_event+0x2a/0x30 [drm_kms_helper] [ 246.691570] output_poll_execute+0x198/0x1c0 [drm_kms_helper] [ 246.692611] process_one_work+0x231/0x620 [ 246.693725] worker_thread+0x214/0x3a0 [ 246.694756] kthread+0x12b/0x150 [ 246.695856] ? wq_pool_ids_show+0x140/0x140 [ 246.696888] ? kthread_create_worker_on_cpu+0x70/0x70 [ 246.697998] ret_from_fork+0x3a/0x50 [ 246.699034] INFO: task kworker/0:1:60 blocked for more than 120 seconds. [ 246.700153] Not tainted 4.18.0-rc5Lyude-Test+ #2 [ 246.701182] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 246.702278] kworker/0:1 D 0 60 2 0x80000000 [ 246.703293] Workqueue: pm pm_runtime_work [ 246.704393] Call Trace: [ 246.705403] __schedule+0x322/0xaf0 [ 246.706439] ? wait_for_completion+0x104/0x190 [ 246.707393] schedule+0x33/0x90 [ 246.708375] schedule_timeout+0x3a5/0x590 [ 246.709289] ? mark_held_locks+0x58/0x80 [ 246.710208] ? _raw_spin_unlock_irq+0x2c/0x40 [ 246.711222] ? wait_for_completion+0x104/0x190 [ 246.712134] ? trace_hardirqs_on_caller+0xf4/0x190 [ 246.713094] ? wait_for_completion+0x104/0x190 [ 246.713964] wait_for_completion+0x12c/0x190 [ 246.714895] ? wake_up_q+0x80/0x80 [ 246.715727] ? get_work_pool+0x90/0x90 [ 246.716649] flush_work+0x1c9/0x280 [ 246.717483] ? flush_workqueue_prep_pwqs+0x1b0/0x1b0 [ 246.718442] __cancel_work_timer+0x146/0x1d0 [ 246.719247] cancel_delayed_work_sync+0x13/0x20 [ 246.720043] drm_kms_helper_poll_disable+0x1f/0x30 [drm_kms_helper] [ 246.721123] nouveau_pmops_runtime_suspend+0x3d/0xb0 [nouveau] [ 246.721897] pci_pm_runtime_suspend+0x6b/0x190 [ 246.722825] ? pci_has_legacy_pm_support+0x70/0x70 [ 246.723737] __rpm_callback+0x7a/0x1d0 [ 246.724721] ? pci_has_legacy_pm_support+0x70/0x70 [ 246.725607] rpm_callback+0x24/0x80 [ 246.726553] ? pci_has_legacy_pm_support+0x70/0x70 [ 246.727376] rpm_suspend+0x142/0x6b0 [ 246.728185] pm_runtime_work+0x97/0xc0 [ 246.728938] process_one_work+0x231/0x620 [ 246.729796] worker_thread+0x44/0x3a0 [ 246.730614] kthread+0x12b/0x150 [ 246.731395] ? wq_pool_ids_show+0x140/0x140 [ 246.732202] ? kthread_create_worker_on_cpu+0x70/0x70 [ 246.732878] ret_from_fork+0x3a/0x50 [ 246.733768] INFO: task kworker/4:2:422 blocked for more than 120 seconds. [ 246.734587] Not tainted 4.18.0-rc5Lyude-Test+ #2 [ 246.735393] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [ 246.736113] kworker/4:2 D 0 422 2 0x80000080 [ 246.736789] Workqueue: events_long drm_dp_mst_link_probe_work [drm_kms_helper] [ 246.737665] Call Trace: [ 246.738490] __schedule+0x322/0xaf0 [ 246.739250] schedule+0x33/0x90 [ 246.739908] rpm_resume+0x19c/0x850 [ 246.740750] ? finish_wait+0x90/0x90 [ 246.741541] __pm_runtime_resume+0x4e/0x90 [ 246.742370] nv50_disp_atomic_commit+0x31/0x210 [nouveau] [ 246.743124] drm_atomic_commit+0x4a/0x50 [drm] [ 246.743775] restore_fbdev_mode_atomic+0x1c8/0x240 [drm_kms_helper] [ 246.744603] restore_fbdev_mode+0x31/0x140 [drm_kms_helper] [ 246.745373] drm_fb_helper_restore_fbdev_mode_unlocked+0x54/0xb0 [drm_kms_helper] [ 246.746220] drm_fb_helper_set_par+0x2d/0x50 [drm_kms_helper] [ 246.746884] drm_fb_helper_hotplug_event.part.28+0x96/0xb0 [drm_kms_helper] [ 246.747675] drm_fb_helper_output_poll_changed+0x23/0x30 [drm_kms_helper] [ 246.748544] drm_kms_helper_hotplug_event+0x2a/0x30 [drm_kms_helper] [ 246.749439] nv50_mstm_hotplug+0x15/0x20 [nouveau] [ 246.750111] drm_dp_send_link_address+0x177/0x1c0 [drm_kms_helper] [ 246.750764] drm_dp_check_and_send_link_address+0xa8/0xd0 [drm_kms_helper] [ 246.751602] drm_dp_mst_link_probe_work+0x51/0x90 [drm_kms_helper] [ 246.752314] process_one_work+0x231/0x620 [ 246.752979] worker_thread+0x44/0x3a0 [ 246.753838] kthread+0x12b/0x150 [ 246.754619] ? wq_pool_ids_show+0x140/0x140 [ 246.755386] ? kthread_create_worker_on_cpu+0x70/0x70 [ 246.756162] ret_from_fork+0x3a/0x50 [ 246.756847] Showing all locks held in the system: [ 246.758261] 3 locks held by kworker/4:0/37: [ 246.759016] #0: 00000000f8df4d2d ((wq_completion)"events"){+.+.}, at: process_one_work+0x1b3/0x620 [ 246.759856] #1: 00000000e6065461 ((work_completion)(&(&dev->mode_config.output_poll_work)->work)){+.+.}, at: process_one_work+0x1b3/0x620 [ 246.760670] #2: 00000000cb66735f (&helper->lock){+.+.}, at: drm_fb_helper_hotplug_event.part.28+0x20/0xb0 [drm_kms_helper] [ 246.761516] 2 locks held by kworker/0:1/60: [ 246.762274] #0: 00000000fff6be0f ((wq_completion)"pm"){+.+.}, at: process_one_work+0x1b3/0x620 [ 246.762982] #1: 000000005ab44fb4 ((work_completion)(&dev->power.work)){+.+.}, at: process_one_work+0x1b3/0x620 [ 246.763890] 1 lock held by khungtaskd/64: [ 246.764664] #0: 000000008cb8b5c3 (rcu_read_lock){....}, at: debug_show_all_locks+0x23/0x185 [ 246.765588] 5 locks held by kworker/4:2/422: [ 246.766440] #0: 00000000232f0959 ((wq_completion)"events_long"){+.+.}, at: process_one_work+0x1b3/0x620 [ 246.767390] #1: 00000000bb59b134 ((work_completion)(&mgr->work)){+.+.}, at: process_one_work+0x1b3/0x620 [ 246.768154] #2: 00000000cb66735f (&helper->lock){+.+.}, at: drm_fb_helper_restore_fbdev_mode_unlocked+0x4c/0xb0 [drm_kms_helper] [ 246.768966] #3: 000000004c8f0b6b (crtc_ww_class_acquire){+.+.}, at: restore_fbdev_mode_atomic+0x4b/0x240 [drm_kms_helper] [ 246.769921] #4: 000000004c34a296 (crtc_ww_class_mutex){+.+.}, at: drm_modeset_backoff+0x8a/0x1b0 [drm] [ 246.770839] 1 lock held by dmesg/1038: [ 246.771739] 2 locks held by zsh/1172: [ 246.772650] #0: 00000000836d0438 (&tty->ldisc_sem){++++}, at: ldsem_down_read+0x37/0x40 [ 246.773680] #1: 000000001f4f4d48 (&ldata->atomic_read_lock){+.+.}, at: n_tty_read+0xc1/0x870 [ 246.775522] ============================================= After trying dozens of different solutions, I found one very simple one that should also have the benefit of preventing us from having to fight locking for the rest of our lives. So, we work around these deadlocks by deferring all fbcon hotplug events that happen after the runtime suspend process starts until after the device is resumed again. Changes since v7: - Fixup commit message - Daniel Vetter Changes since v6: - Remove unused nouveau_fbcon_hotplugged_in_suspend() - Ilia Changes since v5: - Come up with the (hopefully final) solution for solving this dumb problem, one that is a lot less likely to cause issues with locking in the future. This should work around all deadlock conditions with fbcon brought up thus far. Changes since v4: - Add nouveau_fbcon_hotplugged_in_suspend() to workaround deadlock condition that Lukas described - Just move all of this out of drm_fb_helper. It seems that other DRM drivers have already figured out other workarounds for this. If other drivers do end up needing this in the future, we can just move this back into drm_fb_helper again. Changes since v3: - Actually check if fb_helper is NULL in both new helpers - Actually check drm_fbdev_emulation in both new helpers - Don't fire off a fb_helper hotplug unconditionally; only do it if the following conditions are true (as otherwise, calling this in the wrong spot will cause Bad Things to happen): - fb_helper hotplug handling was actually inhibited previously - fb_helper actually has a delayed hotplug pending - fb_helper is actually bound - fb_helper is actually initialized - Add __must_check to drm_fb_helper_suspend_hotplug(). There's no situation where a driver would actually want to use this without checking the return value, so enforce that - Rewrite and clarify the documentation for both helpers. - Make sure to return true in the drm_fb_helper_suspend_hotplug() stub that's provided in drm_fb_helper.h when CONFIG_DRM_FBDEV_EMULATION isn't enabled - Actually grab the toplevel fb_helper lock in drm_fb_helper_resume_hotplug(), since it's possible other activity (such as a hotplug) could be going on at the same time the driver calls drm_fb_helper_resume_hotplug(). We need this to check whether or not drm_fb_helper_hotplug_event() needs to be called anyway Signed-off-by: Lyude Paul <lyude@redhat.com> Reviewed-by: Karol Herbst <kherbst@redhat.com> Acked-by: Daniel Vetter <daniel@ffwll.ch> Cc: stable@vger.kernel.org Cc: Lukas Wunner <lukas@wunner.de> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-08-15 15:00:13 -04:00
.output_poll_changed = nouveau_fbcon_output_poll_changed,
.atomic_check = nv50_disp_atomic_check,
.atomic_commit = nv50_disp_atomic_commit,
.atomic_state_alloc = nv50_disp_atomic_state_alloc,
.atomic_state_clear = nv50_disp_atomic_state_clear,
.atomic_state_free = nv50_disp_atomic_state_free,
};
/******************************************************************************
* Init
*****************************************************************************/
static void
nv50_display_fini(struct drm_device *dev, bool suspend)
{
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
struct drm_plane *plane;
drm_for_each_plane(plane, dev) {
struct nv50_wndw *wndw = nv50_wndw(plane);
if (plane->funcs != &nv50_wndw)
continue;
nv50_wndw_fini(wndw);
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
nv_encoder = nouveau_encoder(encoder);
nv50_mstm_fini(nv_encoder->dp.mstm);
}
}
}
static int
nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
{
struct nv50_core *core = nv50_disp(dev)->core;
struct drm_encoder *encoder;
struct drm_plane *plane;
core->func->init(core);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
struct nouveau_encoder *nv_encoder =
nouveau_encoder(encoder);
drm/dp_mst: Add basic topology reprobing when resuming Finally! For a very long time, our MST helpers have had one very annoying issue: They don't know how to reprobe the topology state when coming out of suspend. This means that if a user has a machine connected to an MST topology and decides to suspend their machine, we lose all topology changes that happened during that period. That can be a big problem if the machine was connected to a different topology on the same port before resuming, as we won't bother reprobing any of the ports and likely cause the user's monitors not to come back up as expected. So, we start fixing this by teaching our MST helpers how to reprobe the link addresses of each connected topology when resuming. As it turns out, the behavior that we want here is identical to the behavior we want when initially probing a newly connected MST topology, with a couple of important differences: - We need to be more careful about handling the potential races between events from the MST hub that could change the topology state as we're performing the link address reprobe - We need to be more careful about handling unlikely state changes on ports - such as an input port turning into an output port, something that would be far more likely to happen in situations like the MST hub we're connected to being changed while we're suspend Both of which have been solved by previous commits. That leaves one requirement: - We need to prune any MST ports in our in-memory topology state that were present when suspending, but have not appeared in the post-resume link address response from their parent branch device Which we can now handle in this commit by modifying drm_dp_send_link_address(). We then introduce suspend/resume reprobing by introducing drm_dp_mst_topology_mgr_invalidate_mstb(), which we call in drm_dp_mst_topology_mgr_suspend() to traverse the in-memory topology state to indicate that each mstb needs it's link address resent and PBN resources reprobed. On resume, we start back up &mgr->work and have it reprobe the topology in the same way we would on a hotplug, removing any leftover ports that no longer appear in the topology state. Changes since v4: * Split indenting changes in drm_dp_mst_topology_mgr_resume() into a separate patch * Only fire hotplugs when something has actually changed after a link address probe * Don't try to change port->connector at all on ports, just throw out ports that need their connectors removed to make things easier. Cc: Juston Li <juston.li@intel.com> Cc: Imre Deak <imre.deak@intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Cc: Harry Wentland <hwentlan@amd.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Sean Paul <sean@poorly.run> Signed-off-by: Lyude Paul <lyude@redhat.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191022023641.8026-14-lyude@redhat.com
2019-06-17 19:57:33 -04:00
nv50_mstm_init(nv_encoder->dp.mstm, runtime);
}
}
drm_for_each_plane(plane, dev) {
struct nv50_wndw *wndw = nv50_wndw(plane);
if (plane->funcs != &nv50_wndw)
continue;
nv50_wndw_init(wndw);
}
return 0;
}
static void
nv50_display_destroy(struct drm_device *dev)
{
struct nv50_disp *disp = nv50_disp(dev);
nv50_audio_component_fini(nouveau_drm(dev));
nv50_core_del(&disp->core);
nouveau_bo_unmap(disp->sync);
if (disp->sync)
nouveau_bo_unpin(disp->sync);
nouveau_bo_ref(NULL, &disp->sync);
nouveau_display(dev)->priv = NULL;
kfree(disp);
}
int
nv50_display_create(struct drm_device *dev)
{
struct nvif_device *device = &nouveau_drm(dev)->client.device;
struct nouveau_drm *drm = nouveau_drm(dev);
struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *tmp;
struct nv50_disp *disp;
struct dcb_output *dcbe;
int crtcs, ret, i;
bool has_mst = nv50_has_mst(drm);
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
return -ENOMEM;
mutex_init(&disp->mutex);
nouveau_display(dev)->priv = disp;
nouveau_display(dev)->dtor = nv50_display_destroy;
nouveau_display(dev)->init = nv50_display_init;
nouveau_display(dev)->fini = nv50_display_fini;
disp->disp = &nouveau_display(dev)->disp;
dev->mode_config.funcs = &nv50_disp_func;
dev->mode_config.quirk_addfb_prefer_xbgr_30bpp = true;
dev->mode_config.normalize_zpos = true;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, NULL, &disp->sync);
if (!ret) {
ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
if (!ret) {
ret = nouveau_bo_map(disp->sync);
if (ret)
nouveau_bo_unpin(disp->sync);
}
if (ret)
nouveau_bo_ref(NULL, &disp->sync);
}
if (ret)
goto out;
/* allocate master evo channel */
ret = nv50_core_new(drm, &disp->core);
if (ret)
goto out;
/* create crtc objects to represent the hw heads */
if (disp->disp->object.oclass >= GV100_DISP)
crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
else
if (disp->disp->object.oclass >= GF110_DISP)
crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
else
crtcs = 0x3;
for (i = 0; i < fls(crtcs); i++) {
struct nv50_head *head;
if (!(crtcs & (1 << i)))
continue;
head = nv50_head_create(dev, i);
if (IS_ERR(head)) {
ret = PTR_ERR(head);
goto out;
}
if (has_mst) {
head->msto = nv50_msto_new(dev, head, i);
if (IS_ERR(head->msto)) {
ret = PTR_ERR(head->msto);
head->msto = NULL;
goto out;
}
/*
* FIXME: This is a hack to workaround the following
* issues:
*
* https://gitlab.gnome.org/GNOME/mutter/issues/759
* https://gitlab.freedesktop.org/xorg/xserver/merge_requests/277
*
* Once these issues are closed, this should be
* removed
*/
head->msto->encoder.possible_crtcs = crtcs;
}
}
/* create encoder/connector objects based on VBIOS DCB table */
for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
connector = nouveau_connector_create(dev, dcbe);
if (IS_ERR(connector))
continue;
if (dcbe->location == DCB_LOC_ON_CHIP) {
switch (dcbe->type) {
case DCB_OUTPUT_TMDS:
case DCB_OUTPUT_LVDS:
case DCB_OUTPUT_DP:
ret = nv50_sor_create(connector, dcbe);
break;
case DCB_OUTPUT_ANALOG:
ret = nv50_dac_create(connector, dcbe);
break;
default:
ret = -ENODEV;
break;
}
} else {
ret = nv50_pior_create(connector, dcbe);
}
if (ret) {
NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
dcbe->location, dcbe->type,
ffs(dcbe->or) - 1, ret);
ret = 0;
}
}
/* cull any connectors we created that don't have an encoder */
list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
if (connector->possible_encoders)
continue;
NV_WARN(drm, "%s has no encoders, removing\n",
connector->name);
connector->funcs->destroy(connector);
}
drm/nouveau/kms/nv50-: Allow vblank_disable_immediate With instantaneous high precision vblank timestamping that updates at leading edge of vblank, the emulated "hw vblank counter" from vblank timestamping, which increments at leading edge of vblank, and reliable page flip execution and completion at leading edge of vblank, we should meet the requirements for fast/ immediate vblank irq disable/enable. This is only allowed on nv50+ gpu's, ie. the ones with atomic modesetting. One requirement for immediate vblank disable is that high precision vblank timestamping works reliably all the time on all connectors. This is not the case on all pre-nv50 parts for analog VGA outputs, where we currently don't always have support for scanout position queries and therefore fall back to vblank interrupt timestamping. The implementation in nv04_head_state() does not return valid values for vblanks, vtotal, hblanks, htotal for VGA outputs on all cards, but those are needed for scanout position queries. Testing on Linux-4.12-rc5 + drm-next on a GeForce 9500 GT (NV G96) with timing measurement equipment indicates this works fine, so allow immediate vblank disable for power saving. For debugging in case of unexpected trouble, booting with kernel cmdline option drm.vblankoffdelay=0 (or echo 0 > /sys/module/drm/parameters/vblankoffdelay) would keep vblank irqs permanently on to approximate old behavior. Signed-off-by: Mario Kleiner <mario.kleiner.de@gmail.com> Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2018-07-16 16:47:50 +10:00
/* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
dev->vblank_disable_immediate = true;
nv50_audio_component_init(drm);
out:
if (ret)
nv50_display_destroy(dev);
return ret;
}