mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
drm/nouveau/fb: handle sysmem flush page from common code
- also executes pre-DEVINIT, so early boot is able to DMA sysmem Signed-off-by: Ben Skeggs <bskeggs@redhat.com> Reviewed-by: Lyude Paul <lyude@redhat.com>
This commit is contained in:
parent
0e44c21708
commit
5728d06419
15 changed files with 80 additions and 83 deletions
|
@ -35,6 +35,11 @@ struct nvkm_fb {
|
||||||
|
|
||||||
struct nvkm_blob vpr_scrubber;
|
struct nvkm_blob vpr_scrubber;
|
||||||
|
|
||||||
|
struct {
|
||||||
|
struct page *flush_page;
|
||||||
|
dma_addr_t flush_page_addr;
|
||||||
|
} sysmem;
|
||||||
|
|
||||||
struct nvkm_ram *ram;
|
struct nvkm_ram *ram;
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
|
|
|
@ -57,6 +57,15 @@ nvkm_fb_tile_prog(struct nvkm_fb *fb, int region, struct nvkm_fb_tile *tile)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
nvkm_fb_sysmem_flush_page_init(struct nvkm_device *device)
|
||||||
|
{
|
||||||
|
struct nvkm_fb *fb = device->fb;
|
||||||
|
|
||||||
|
if (fb->func->sysmem.flush_page_init)
|
||||||
|
fb->func->sysmem.flush_page_init(fb);
|
||||||
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
nvkm_fb_bios_memtype(struct nvkm_bios *bios)
|
nvkm_fb_bios_memtype(struct nvkm_bios *bios)
|
||||||
{
|
{
|
||||||
|
@ -168,6 +177,8 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
|
||||||
for (i = 0; i < fb->tile.regions; i++)
|
for (i = 0; i < fb->tile.regions; i++)
|
||||||
fb->func->tile.prog(fb, i, &fb->tile.region[i]);
|
fb->func->tile.prog(fb, i, &fb->tile.region[i]);
|
||||||
|
|
||||||
|
nvkm_fb_sysmem_flush_page_init(subdev->device);
|
||||||
|
|
||||||
if (fb->func->init)
|
if (fb->func->init)
|
||||||
fb->func->init(fb);
|
fb->func->init(fb);
|
||||||
|
|
||||||
|
@ -193,6 +204,13 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
nvkm_fb_preinit(struct nvkm_subdev *subdev)
|
||||||
|
{
|
||||||
|
nvkm_fb_sysmem_flush_page_init(subdev->device);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
nvkm_fb_dtor(struct nvkm_subdev *subdev)
|
nvkm_fb_dtor(struct nvkm_subdev *subdev)
|
||||||
{
|
{
|
||||||
|
@ -212,20 +230,28 @@ nvkm_fb_dtor(struct nvkm_subdev *subdev)
|
||||||
|
|
||||||
nvkm_blob_dtor(&fb->vpr_scrubber);
|
nvkm_blob_dtor(&fb->vpr_scrubber);
|
||||||
|
|
||||||
|
if (fb->sysmem.flush_page) {
|
||||||
|
dma_unmap_page(subdev->device->dev, fb->sysmem.flush_page_addr,
|
||||||
|
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||||
|
__free_page(fb->sysmem.flush_page);
|
||||||
|
}
|
||||||
|
|
||||||
if (fb->func->dtor)
|
if (fb->func->dtor)
|
||||||
return fb->func->dtor(fb);
|
return fb->func->dtor(fb);
|
||||||
|
|
||||||
return fb;
|
return fb;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct nvkm_subdev_func
|
static const struct nvkm_subdev_func
|
||||||
nvkm_fb = {
|
nvkm_fb = {
|
||||||
.dtor = nvkm_fb_dtor,
|
.dtor = nvkm_fb_dtor,
|
||||||
|
.preinit = nvkm_fb_preinit,
|
||||||
.oneinit = nvkm_fb_oneinit,
|
.oneinit = nvkm_fb_oneinit,
|
||||||
.init = nvkm_fb_init,
|
.init = nvkm_fb_init,
|
||||||
.intr = nvkm_fb_intr,
|
.intr = nvkm_fb_intr,
|
||||||
};
|
};
|
||||||
|
|
||||||
void
|
int
|
||||||
nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
|
nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
|
||||||
enum nvkm_subdev_type type, int inst, struct nvkm_fb *fb)
|
enum nvkm_subdev_type type, int inst, struct nvkm_fb *fb)
|
||||||
{
|
{
|
||||||
|
@ -234,6 +260,19 @@ nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
|
||||||
fb->tile.regions = fb->func->tile.regions;
|
fb->tile.regions = fb->func->tile.regions;
|
||||||
fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", fb->func->default_bigpage);
|
fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", fb->func->default_bigpage);
|
||||||
mutex_init(&fb->tags.mutex);
|
mutex_init(&fb->tags.mutex);
|
||||||
|
|
||||||
|
if (func->sysmem.flush_page_init) {
|
||||||
|
fb->sysmem.flush_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
||||||
|
if (!fb->sysmem.flush_page)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
fb->sysmem.flush_page_addr = dma_map_page(device->dev, fb->sysmem.flush_page,
|
||||||
|
0, PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||||
|
if (dma_mapping_error(device->dev, fb->sysmem.flush_page_addr))
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
|
@ -242,6 +281,5 @@ nvkm_fb_new_(const struct nvkm_fb_func *func, struct nvkm_device *device,
|
||||||
{
|
{
|
||||||
if (!(*pfb = kzalloc(sizeof(**pfb), GFP_KERNEL)))
|
if (!(*pfb = kzalloc(sizeof(**pfb), GFP_KERNEL)))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
nvkm_fb_ctor(func, device, type, inst, *pfb);
|
return nvkm_fb_ctor(func, device, type, inst, *pfb);
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,9 +26,10 @@ static const struct nvkm_fb_func
|
||||||
ga100_fb = {
|
ga100_fb = {
|
||||||
.dtor = gf100_fb_dtor,
|
.dtor = gf100_fb_dtor,
|
||||||
.oneinit = gf100_fb_oneinit,
|
.oneinit = gf100_fb_oneinit,
|
||||||
.init = gp100_fb_init,
|
.init = gm200_fb_init,
|
||||||
.init_page = gv100_fb_init_page,
|
.init_page = gv100_fb_init_page,
|
||||||
.init_unkn = gp100_fb_init_unkn,
|
.init_unkn = gp100_fb_init_unkn,
|
||||||
|
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
|
||||||
.ram_new = gp100_ram_new,
|
.ram_new = gp100_ram_new,
|
||||||
.default_bigpage = 16,
|
.default_bigpage = 16,
|
||||||
};
|
};
|
||||||
|
|
|
@ -26,9 +26,10 @@ static const struct nvkm_fb_func
|
||||||
ga102_fb = {
|
ga102_fb = {
|
||||||
.dtor = gf100_fb_dtor,
|
.dtor = gf100_fb_dtor,
|
||||||
.oneinit = gf100_fb_oneinit,
|
.oneinit = gf100_fb_oneinit,
|
||||||
.init = gp100_fb_init,
|
.init = gm200_fb_init,
|
||||||
.init_page = gv100_fb_init_page,
|
.init_page = gv100_fb_init_page,
|
||||||
.init_unkn = gp100_fb_init_unkn,
|
.init_unkn = gp100_fb_init_unkn,
|
||||||
|
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
|
||||||
.ram_new = ga102_ram_new,
|
.ram_new = ga102_ram_new,
|
||||||
.default_bigpage = 16,
|
.default_bigpage = 16,
|
||||||
};
|
};
|
||||||
|
|
|
@ -61,14 +61,6 @@ gf100_fb_oneinit(struct nvkm_fb *base)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
fb->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
|
||||||
if (fb->r100c10_page) {
|
|
||||||
fb->r100c10 = dma_map_page(device->dev, fb->r100c10_page, 0,
|
|
||||||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
|
||||||
if (dma_mapping_error(device->dev, fb->r100c10))
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,15 +77,18 @@ gf100_fb_init_page(struct nvkm_fb *fb)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
gf100_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
|
||||||
|
{
|
||||||
|
nvkm_wr32(fb->subdev.device, 0x100c10, fb->sysmem.flush_page_addr >> 8);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
gf100_fb_init(struct nvkm_fb *base)
|
gf100_fb_init(struct nvkm_fb *base)
|
||||||
{
|
{
|
||||||
struct gf100_fb *fb = gf100_fb(base);
|
struct gf100_fb *fb = gf100_fb(base);
|
||||||
struct nvkm_device *device = fb->base.subdev.device;
|
struct nvkm_device *device = fb->base.subdev.device;
|
||||||
|
|
||||||
if (fb->r100c10_page)
|
|
||||||
nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
|
|
||||||
|
|
||||||
if (base->func->clkgate_pack) {
|
if (base->func->clkgate_pack) {
|
||||||
nvkm_therm_clkgate_init(device->therm,
|
nvkm_therm_clkgate_init(device->therm,
|
||||||
base->func->clkgate_pack);
|
base->func->clkgate_pack);
|
||||||
|
@ -104,13 +99,6 @@ void *
|
||||||
gf100_fb_dtor(struct nvkm_fb *base)
|
gf100_fb_dtor(struct nvkm_fb *base)
|
||||||
{
|
{
|
||||||
struct gf100_fb *fb = gf100_fb(base);
|
struct gf100_fb *fb = gf100_fb(base);
|
||||||
struct nvkm_device *device = fb->base.subdev.device;
|
|
||||||
|
|
||||||
if (fb->r100c10_page) {
|
|
||||||
dma_unmap_page(device->dev, fb->r100c10, PAGE_SIZE,
|
|
||||||
DMA_BIDIRECTIONAL);
|
|
||||||
__free_page(fb->r100c10_page);
|
|
||||||
}
|
|
||||||
|
|
||||||
return fb;
|
return fb;
|
||||||
}
|
}
|
||||||
|
@ -136,6 +124,7 @@ gf100_fb = {
|
||||||
.init = gf100_fb_init,
|
.init = gf100_fb_init,
|
||||||
.init_page = gf100_fb_init_page,
|
.init_page = gf100_fb_init_page,
|
||||||
.intr = gf100_fb_intr,
|
.intr = gf100_fb_intr,
|
||||||
|
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
|
||||||
.ram_new = gf100_ram_new,
|
.ram_new = gf100_ram_new,
|
||||||
.default_bigpage = 17,
|
.default_bigpage = 17,
|
||||||
};
|
};
|
||||||
|
|
|
@ -6,8 +6,6 @@
|
||||||
|
|
||||||
struct gf100_fb {
|
struct gf100_fb {
|
||||||
struct nvkm_fb base;
|
struct nvkm_fb base;
|
||||||
struct page *r100c10_page;
|
|
||||||
dma_addr_t r100c10;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
|
int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
|
||||||
|
@ -16,7 +14,5 @@ void *gf100_fb_dtor(struct nvkm_fb *);
|
||||||
void gf100_fb_init(struct nvkm_fb *);
|
void gf100_fb_init(struct nvkm_fb *);
|
||||||
void gf100_fb_intr(struct nvkm_fb *);
|
void gf100_fb_intr(struct nvkm_fb *);
|
||||||
|
|
||||||
void gp100_fb_init(struct nvkm_fb *);
|
|
||||||
|
|
||||||
void gm200_fb_init(struct nvkm_fb *base);
|
void gm200_fb_init(struct nvkm_fb *base);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -46,9 +46,6 @@ gm200_fb_init(struct nvkm_fb *base)
|
||||||
struct gf100_fb *fb = gf100_fb(base);
|
struct gf100_fb *fb = gf100_fb(base);
|
||||||
struct nvkm_device *device = fb->base.subdev.device;
|
struct nvkm_device *device = fb->base.subdev.device;
|
||||||
|
|
||||||
if (fb->r100c10_page)
|
|
||||||
nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
|
|
||||||
|
|
||||||
nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8);
|
nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8);
|
||||||
nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8);
|
nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8);
|
||||||
nvkm_mask(device, 0x100cc4, 0x00060000,
|
nvkm_mask(device, 0x100cc4, 0x00060000,
|
||||||
|
@ -62,6 +59,7 @@ gm200_fb = {
|
||||||
.init = gm200_fb_init,
|
.init = gm200_fb_init,
|
||||||
.init_page = gm200_fb_init_page,
|
.init_page = gm200_fb_init_page,
|
||||||
.intr = gf100_fb_intr,
|
.intr = gf100_fb_intr,
|
||||||
|
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
|
||||||
.ram_new = gm200_ram_new,
|
.ram_new = gm200_ram_new,
|
||||||
.default_bigpage = 0 /* per-instance. */,
|
.default_bigpage = 0 /* per-instance. */,
|
||||||
};
|
};
|
||||||
|
|
|
@ -30,6 +30,7 @@ gm20b_fb = {
|
||||||
.init = gm200_fb_init,
|
.init = gm200_fb_init,
|
||||||
.init_page = gm200_fb_init_page,
|
.init_page = gm200_fb_init_page,
|
||||||
.intr = gf100_fb_intr,
|
.intr = gf100_fb_intr,
|
||||||
|
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
|
||||||
.default_bigpage = 0 /* per-instance. */,
|
.default_bigpage = 0 /* per-instance. */,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -44,29 +44,15 @@ gp100_fb_init_remapper(struct nvkm_fb *fb)
|
||||||
nvkm_mask(device, 0x100c14, 0x00040000, 0x00000000);
|
nvkm_mask(device, 0x100c14, 0x00040000, 0x00000000);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
|
||||||
gp100_fb_init(struct nvkm_fb *base)
|
|
||||||
{
|
|
||||||
struct gf100_fb *fb = gf100_fb(base);
|
|
||||||
struct nvkm_device *device = fb->base.subdev.device;
|
|
||||||
|
|
||||||
if (fb->r100c10_page)
|
|
||||||
nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
|
|
||||||
|
|
||||||
nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8);
|
|
||||||
nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8);
|
|
||||||
nvkm_mask(device, 0x100cc4, 0x00060000,
|
|
||||||
min(nvkm_memory_size(fb->base.mmu_rd) >> 16, (u64)2) << 17);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct nvkm_fb_func
|
static const struct nvkm_fb_func
|
||||||
gp100_fb = {
|
gp100_fb = {
|
||||||
.dtor = gf100_fb_dtor,
|
.dtor = gf100_fb_dtor,
|
||||||
.oneinit = gf100_fb_oneinit,
|
.oneinit = gf100_fb_oneinit,
|
||||||
.init = gp100_fb_init,
|
.init = gm200_fb_init,
|
||||||
.init_remapper = gp100_fb_init_remapper,
|
.init_remapper = gp100_fb_init_remapper,
|
||||||
.init_page = gm200_fb_init_page,
|
.init_page = gm200_fb_init_page,
|
||||||
.init_unkn = gp100_fb_init_unkn,
|
.init_unkn = gp100_fb_init_unkn,
|
||||||
|
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
|
||||||
.ram_new = gp100_ram_new,
|
.ram_new = gp100_ram_new,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -55,9 +55,10 @@ static const struct nvkm_fb_func
|
||||||
gp102_fb = {
|
gp102_fb = {
|
||||||
.dtor = gf100_fb_dtor,
|
.dtor = gf100_fb_dtor,
|
||||||
.oneinit = gf100_fb_oneinit,
|
.oneinit = gf100_fb_oneinit,
|
||||||
.init = gp100_fb_init,
|
.init = gm200_fb_init,
|
||||||
.init_remapper = gp100_fb_init_remapper,
|
.init_remapper = gp100_fb_init_remapper,
|
||||||
.init_page = gm200_fb_init_page,
|
.init_page = gm200_fb_init_page,
|
||||||
|
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
|
||||||
.vpr.scrub_required = gp102_fb_vpr_scrub_required,
|
.vpr.scrub_required = gp102_fb_vpr_scrub_required,
|
||||||
.vpr.scrub = gp102_fb_vpr_scrub,
|
.vpr.scrub = gp102_fb_vpr_scrub,
|
||||||
.ram_new = gp100_ram_new,
|
.ram_new = gp100_ram_new,
|
||||||
|
|
|
@ -28,6 +28,7 @@ gp10b_fb = {
|
||||||
.init = gm200_fb_init,
|
.init = gm200_fb_init,
|
||||||
.init_page = gm200_fb_init_page,
|
.init_page = gm200_fb_init_page,
|
||||||
.intr = gf100_fb_intr,
|
.intr = gf100_fb_intr,
|
||||||
|
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
|
||||||
};
|
};
|
||||||
|
|
||||||
int
|
int
|
||||||
|
|
|
@ -32,9 +32,10 @@ static const struct nvkm_fb_func
|
||||||
gv100_fb = {
|
gv100_fb = {
|
||||||
.dtor = gf100_fb_dtor,
|
.dtor = gf100_fb_dtor,
|
||||||
.oneinit = gf100_fb_oneinit,
|
.oneinit = gf100_fb_oneinit,
|
||||||
.init = gp100_fb_init,
|
.init = gm200_fb_init,
|
||||||
.init_page = gv100_fb_init_page,
|
.init_page = gv100_fb_init_page,
|
||||||
.init_unkn = gp100_fb_init_unkn,
|
.init_unkn = gp100_fb_init_unkn,
|
||||||
|
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
|
||||||
.vpr.scrub_required = gp102_fb_vpr_scrub_required,
|
.vpr.scrub_required = gp102_fb_vpr_scrub_required,
|
||||||
.vpr.scrub = gp102_fb_vpr_scrub,
|
.vpr.scrub = gp102_fb_vpr_scrub,
|
||||||
.ram_new = gp100_ram_new,
|
.ram_new = gp100_ram_new,
|
||||||
|
|
|
@ -191,35 +191,12 @@ nv50_fb_intr(struct nvkm_fb *base)
|
||||||
nvkm_chan_put(&chan, flags);
|
nvkm_chan_put(&chan, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
|
||||||
nv50_fb_oneinit(struct nvkm_fb *base)
|
|
||||||
{
|
|
||||||
struct nv50_fb *fb = nv50_fb(base);
|
|
||||||
struct nvkm_device *device = fb->base.subdev.device;
|
|
||||||
|
|
||||||
fb->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
|
|
||||||
if (fb->r100c08_page) {
|
|
||||||
fb->r100c08 = dma_map_page(device->dev, fb->r100c08_page, 0,
|
|
||||||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
|
||||||
if (dma_mapping_error(device->dev, fb->r100c08))
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void
|
static void
|
||||||
nv50_fb_init(struct nvkm_fb *base)
|
nv50_fb_init(struct nvkm_fb *base)
|
||||||
{
|
{
|
||||||
struct nv50_fb *fb = nv50_fb(base);
|
struct nv50_fb *fb = nv50_fb(base);
|
||||||
struct nvkm_device *device = fb->base.subdev.device;
|
struct nvkm_device *device = fb->base.subdev.device;
|
||||||
|
|
||||||
/* Not a clue what this is exactly. Without pointing it at a
|
|
||||||
* scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
|
|
||||||
* cause IOMMU "read from address 0" errors (rh#561267)
|
|
||||||
*/
|
|
||||||
nvkm_wr32(device, 0x100c08, fb->r100c08 >> 8);
|
|
||||||
|
|
||||||
/* This is needed to get meaningful information from 100c90
|
/* This is needed to get meaningful information from 100c90
|
||||||
* on traps. No idea what these values mean exactly. */
|
* on traps. No idea what these values mean exactly. */
|
||||||
nvkm_wr32(device, 0x100c90, fb->func->trap);
|
nvkm_wr32(device, 0x100c90, fb->func->trap);
|
||||||
|
@ -234,17 +211,16 @@ nv50_fb_tags(struct nvkm_fb *base)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
nv50_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
|
||||||
|
{
|
||||||
|
nvkm_wr32(fb->subdev.device, 0x100c08, fb->sysmem.flush_page_addr >> 8);
|
||||||
|
}
|
||||||
|
|
||||||
static void *
|
static void *
|
||||||
nv50_fb_dtor(struct nvkm_fb *base)
|
nv50_fb_dtor(struct nvkm_fb *base)
|
||||||
{
|
{
|
||||||
struct nv50_fb *fb = nv50_fb(base);
|
struct nv50_fb *fb = nv50_fb(base);
|
||||||
struct nvkm_device *device = fb->base.subdev.device;
|
|
||||||
|
|
||||||
if (fb->r100c08_page) {
|
|
||||||
dma_unmap_page(device->dev, fb->r100c08, PAGE_SIZE,
|
|
||||||
DMA_BIDIRECTIONAL);
|
|
||||||
__free_page(fb->r100c08_page);
|
|
||||||
}
|
|
||||||
|
|
||||||
return fb;
|
return fb;
|
||||||
}
|
}
|
||||||
|
@ -253,9 +229,9 @@ static const struct nvkm_fb_func
|
||||||
nv50_fb_ = {
|
nv50_fb_ = {
|
||||||
.dtor = nv50_fb_dtor,
|
.dtor = nv50_fb_dtor,
|
||||||
.tags = nv50_fb_tags,
|
.tags = nv50_fb_tags,
|
||||||
.oneinit = nv50_fb_oneinit,
|
|
||||||
.init = nv50_fb_init,
|
.init = nv50_fb_init,
|
||||||
.intr = nv50_fb_intr,
|
.intr = nv50_fb_intr,
|
||||||
|
.sysmem.flush_page_init = nv50_fb_sysmem_flush_page_init,
|
||||||
.ram_new = nv50_fb_ram_new,
|
.ram_new = nv50_fb_ram_new,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -7,8 +7,6 @@
|
||||||
struct nv50_fb {
|
struct nv50_fb {
|
||||||
const struct nv50_fb_func *func;
|
const struct nv50_fb_func *func;
|
||||||
struct nvkm_fb base;
|
struct nvkm_fb base;
|
||||||
struct page *r100c08_page;
|
|
||||||
dma_addr_t r100c08;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nv50_fb_func {
|
struct nv50_fb_func {
|
||||||
|
|
|
@ -16,6 +16,10 @@ struct nvkm_fb_func {
|
||||||
void (*init_unkn)(struct nvkm_fb *);
|
void (*init_unkn)(struct nvkm_fb *);
|
||||||
void (*intr)(struct nvkm_fb *);
|
void (*intr)(struct nvkm_fb *);
|
||||||
|
|
||||||
|
struct nvkm_fb_func_sysmem {
|
||||||
|
void (*flush_page_init)(struct nvkm_fb *);
|
||||||
|
} sysmem;
|
||||||
|
|
||||||
struct {
|
struct {
|
||||||
bool (*scrub_required)(struct nvkm_fb *);
|
bool (*scrub_required)(struct nvkm_fb *);
|
||||||
int (*scrub)(struct nvkm_fb *);
|
int (*scrub)(struct nvkm_fb *);
|
||||||
|
@ -37,8 +41,8 @@ struct nvkm_fb_func {
|
||||||
const struct nvkm_therm_clkgate_pack *clkgate_pack;
|
const struct nvkm_therm_clkgate_pack *clkgate_pack;
|
||||||
};
|
};
|
||||||
|
|
||||||
void nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
|
int nvkm_fb_ctor(const struct nvkm_fb_func *, struct nvkm_device *device,
|
||||||
enum nvkm_subdev_type type, int inst, struct nvkm_fb *);
|
enum nvkm_subdev_type type, int inst, struct nvkm_fb *);
|
||||||
int nvkm_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *device,
|
int nvkm_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *device,
|
||||||
enum nvkm_subdev_type type, int inst, struct nvkm_fb **);
|
enum nvkm_subdev_type type, int inst, struct nvkm_fb **);
|
||||||
int nvkm_fb_bios_memtype(struct nvkm_bios *);
|
int nvkm_fb_bios_memtype(struct nvkm_bios *);
|
||||||
|
@ -72,6 +76,7 @@ void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
|
||||||
|
|
||||||
int gf100_fb_oneinit(struct nvkm_fb *);
|
int gf100_fb_oneinit(struct nvkm_fb *);
|
||||||
int gf100_fb_init_page(struct nvkm_fb *);
|
int gf100_fb_init_page(struct nvkm_fb *);
|
||||||
|
void gf100_fb_sysmem_flush_page_init(struct nvkm_fb *);
|
||||||
|
|
||||||
int gm200_fb_init_page(struct nvkm_fb *);
|
int gm200_fb_init_page(struct nvkm_fb *);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue