mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-08-05 16:54:27 +00:00
drm/nouveau: improve handling of 64-bit BARs
GPUs exist now with a 64-bit BAR0, which mean that BAR1 and BAR2's indices (as passed to pci_resource_len() etc) are bumped up by one. Modify nvkm_device.resource_addr/size() to take an enum instead of an integer bar index, and take IORESOURCE_MEM_64 into account when translating to the "raw" bar id. [airlied: fixup ERR_PTR] Signed-off-by: Ben Skeggs <bskeggs@nvidia.com> Reviewed-by: Dave Airlie <airlied@redhat.com> Reviewed-by: Timur Tabi <ttabi@nvidia.com> Tested-by: Timur Tabi <ttabi@nvidia.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
parent
b1ca384772
commit
76b8f81a5b
36 changed files with 111 additions and 75 deletions
|
@ -77,6 +77,13 @@ struct nvkm_device {
|
|||
struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int type, int inst);
|
||||
struct nvkm_engine *nvkm_device_engine(struct nvkm_device *, int type, int inst);
|
||||
|
||||
enum nvkm_bar_id {
|
||||
NVKM_BAR_INVALID = 0,
|
||||
NVKM_BAR0_PRI,
|
||||
NVKM_BAR1_FB,
|
||||
NVKM_BAR2_INST,
|
||||
};
|
||||
|
||||
struct nvkm_device_func {
|
||||
struct nvkm_device_pci *(*pci)(struct nvkm_device *);
|
||||
struct nvkm_device_tegra *(*tegra)(struct nvkm_device *);
|
||||
|
@ -85,8 +92,8 @@ struct nvkm_device_func {
|
|||
int (*init)(struct nvkm_device *);
|
||||
void (*fini)(struct nvkm_device *, bool suspend);
|
||||
int (*irq)(struct nvkm_device *);
|
||||
resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
|
||||
resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
|
||||
resource_size_t (*resource_addr)(struct nvkm_device *, enum nvkm_bar_id);
|
||||
resource_size_t (*resource_size)(struct nvkm_device *, enum nvkm_bar_id);
|
||||
bool cpu_coherent;
|
||||
};
|
||||
|
||||
|
|
|
@ -315,7 +315,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
|
|||
break;
|
||||
}
|
||||
case NOUVEAU_GETPARAM_VRAM_BAR_SIZE:
|
||||
getparam->value = nvkm_device->func->resource_size(nvkm_device, 1);
|
||||
getparam->value = nvkm_device->func->resource_size(nvkm_device, NVKM_BAR1_FB);
|
||||
break;
|
||||
case NOUVEAU_GETPARAM_VRAM_USED: {
|
||||
struct ttm_resource_manager *vram_mgr = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
|
||||
|
|
|
@ -1204,7 +1204,7 @@ retry:
|
|||
fallthrough; /* tiled memory */
|
||||
case TTM_PL_VRAM:
|
||||
reg->bus.offset = (reg->start << PAGE_SHIFT) +
|
||||
device->func->resource_addr(device, 1);
|
||||
device->func->resource_addr(device, NVKM_BAR1_FB);
|
||||
reg->bus.is_iomem = true;
|
||||
|
||||
/* Some BARs do not support being ioremapped WC */
|
||||
|
@ -1295,7 +1295,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct nvkm_device *device = nvxx_device(drm);
|
||||
u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
|
||||
u32 mappable = device->func->resource_size(device, NVKM_BAR1_FB) >> PAGE_SHIFT;
|
||||
int i, ret;
|
||||
|
||||
/* as long as the bo isn't in vram, and isn't tiled, we've got
|
||||
|
|
|
@ -209,13 +209,15 @@ nouveau_channel_prep(struct nouveau_cli *cli,
|
|||
} else
|
||||
if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) {
|
||||
if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
|
||||
struct nvkm_device *nvkm_device = nvxx_device(drm);
|
||||
|
||||
/* nv04 vram pushbuf hack, retarget to its location in
|
||||
* the framebuffer bar rather than direct vram access..
|
||||
* nfi why this exists, it came from the -nv ddx.
|
||||
*/
|
||||
args.target = NV_DMA_V0_TARGET_PCI;
|
||||
args.access = NV_DMA_V0_ACCESS_RDWR;
|
||||
args.start = nvxx_device(drm)->func->resource_addr(nvxx_device(drm), 1);
|
||||
args.start = nvkm_device->func->resource_addr(nvkm_device, NVKM_BAR1_FB);
|
||||
args.limit = args.start + device->info.ram_user - 1;
|
||||
} else {
|
||||
args.target = NV_DMA_V0_TARGET_VRAM;
|
||||
|
|
|
@ -312,8 +312,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
|
|||
/* VRAM init */
|
||||
drm->gem.vram_available = drm->client.device.info.ram_user;
|
||||
|
||||
arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
|
||||
device->func->resource_size(device, 1));
|
||||
arch_io_reserve_memtype_wc(device->func->resource_addr(device, NVKM_BAR1_FB),
|
||||
device->func->resource_size(device, NVKM_BAR1_FB));
|
||||
|
||||
ret = nouveau_ttm_init_vram(drm);
|
||||
if (ret) {
|
||||
|
@ -321,8 +321,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
|
|||
return ret;
|
||||
}
|
||||
|
||||
drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
|
||||
device->func->resource_size(device, 1));
|
||||
drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, NVKM_BAR1_FB),
|
||||
device->func->resource_size(device, NVKM_BAR1_FB));
|
||||
|
||||
/* GART init */
|
||||
if (!drm->agp.bridge) {
|
||||
|
@ -357,7 +357,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
|
|||
|
||||
arch_phys_wc_del(drm->ttm.mtrr);
|
||||
drm->ttm.mtrr = 0;
|
||||
arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
|
||||
device->func->resource_size(device, 1));
|
||||
arch_io_free_memtype_wc(device->func->resource_addr(device, NVKM_BAR1_FB),
|
||||
device->func->resource_size(device, NVKM_BAR1_FB));
|
||||
|
||||
}
|
||||
|
|
|
@ -3027,8 +3027,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
|||
device->debug = nvkm_dbgopt(device->dbgopt, "device");
|
||||
INIT_LIST_HEAD(&device->subdev);
|
||||
|
||||
mmio_base = device->func->resource_addr(device, 0);
|
||||
mmio_size = device->func->resource_size(device, 0);
|
||||
mmio_base = device->func->resource_addr(device, NVKM_BAR0_PRI);
|
||||
mmio_size = device->func->resource_size(device, NVKM_BAR0_PRI);
|
||||
|
||||
device->pri = ioremap(mmio_base, mmio_size);
|
||||
if (device->pri == NULL) {
|
||||
|
|
|
@ -1560,18 +1560,42 @@ nvkm_device_pci(struct nvkm_device *device)
|
|||
return container_of(device, struct nvkm_device_pci, device);
|
||||
}
|
||||
|
||||
static resource_size_t
|
||||
nvkm_device_pci_resource_addr(struct nvkm_device *device, unsigned bar)
|
||||
static int
|
||||
nvkm_device_pci_resource_idx(struct nvkm_device_pci *pdev, enum nvkm_bar_id bar)
|
||||
{
|
||||
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
|
||||
return pci_resource_start(pdev->pdev, bar);
|
||||
int idx = 0;
|
||||
|
||||
if (bar == NVKM_BAR0_PRI)
|
||||
return idx;
|
||||
|
||||
idx += (pci_resource_flags(pdev->pdev, idx) & IORESOURCE_MEM_64) ? 2 : 1;
|
||||
if (bar == NVKM_BAR1_FB)
|
||||
return idx;
|
||||
|
||||
idx += (pci_resource_flags(pdev->pdev, idx) & IORESOURCE_MEM_64) ? 2 : 1;
|
||||
if (bar == NVKM_BAR2_INST)
|
||||
return idx;
|
||||
|
||||
WARN_ON(1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static resource_size_t
|
||||
nvkm_device_pci_resource_size(struct nvkm_device *device, unsigned bar)
|
||||
nvkm_device_pci_resource_addr(struct nvkm_device *device, enum nvkm_bar_id bar)
|
||||
{
|
||||
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
|
||||
return pci_resource_len(pdev->pdev, bar);
|
||||
int idx = nvkm_device_pci_resource_idx(pdev, bar);
|
||||
|
||||
return idx >= 0 ? pci_resource_start(pdev->pdev, idx) : 0;
|
||||
}
|
||||
|
||||
static resource_size_t
|
||||
nvkm_device_pci_resource_size(struct nvkm_device *device, enum nvkm_bar_id bar)
|
||||
{
|
||||
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
|
||||
int idx = nvkm_device_pci_resource_idx(pdev, bar);
|
||||
|
||||
return idx >= 0 ? pci_resource_len(pdev->pdev, idx) : 0;
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -186,21 +186,31 @@ nvkm_device_tegra(struct nvkm_device *device)
|
|||
}
|
||||
|
||||
static struct resource *
|
||||
nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
|
||||
nvkm_device_tegra_resource(struct nvkm_device *device, enum nvkm_bar_id bar)
|
||||
{
|
||||
struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
|
||||
return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
|
||||
int idx;
|
||||
|
||||
switch (bar) {
|
||||
case NVKM_BAR0_PRI: idx = 0; break;
|
||||
case NVKM_BAR1_FB : idx = 1; break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
return platform_get_resource(tdev->pdev, IORESOURCE_MEM, idx);
|
||||
}
|
||||
|
||||
static resource_size_t
|
||||
nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
|
||||
nvkm_device_tegra_resource_addr(struct nvkm_device *device, enum nvkm_bar_id bar)
|
||||
{
|
||||
struct resource *res = nvkm_device_tegra_resource(device, bar);
|
||||
return res ? res->start : 0;
|
||||
}
|
||||
|
||||
static resource_size_t
|
||||
nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
|
||||
nvkm_device_tegra_resource_size(struct nvkm_device *device, enum nvkm_bar_id bar)
|
||||
{
|
||||
struct resource *res = nvkm_device_tegra_resource(device, bar);
|
||||
return res ? resource_size(res) : 0;
|
||||
|
|
|
@ -209,8 +209,8 @@ nvkm_udevice_map(struct nvkm_object *object, void *argv, u32 argc,
|
|||
struct nvkm_udevice *udev = nvkm_udevice(object);
|
||||
struct nvkm_device *device = udev->device;
|
||||
*type = NVKM_OBJECT_MAP_IO;
|
||||
*addr = device->func->resource_addr(device, 0);
|
||||
*size = device->func->resource_size(device, 0);
|
||||
*addr = device->func->resource_addr(device, NVKM_BAR0_PRI);
|
||||
*size = device->func->resource_size(device, NVKM_BAR0_PRI);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ nvkm_disp_chan_map(struct nvkm_object *object, void *argv, u32 argc,
|
|||
{
|
||||
struct nvkm_disp_chan *chan = nvkm_disp_chan(object);
|
||||
struct nvkm_device *device = chan->disp->engine.subdev.device;
|
||||
const u64 base = device->func->resource_addr(device, 0);
|
||||
const u64 base = device->func->resource_addr(device, NVKM_BAR0_PRI);
|
||||
|
||||
*type = NVKM_OBJECT_MAP_IO;
|
||||
*addr = base + chan->func->user(chan, size);
|
||||
|
|
|
@ -805,7 +805,7 @@ gv100_disp_caps_map(struct nvkm_object *object, void *argv, u32 argc,
|
|||
struct gv100_disp_caps *caps = gv100_disp_caps(object);
|
||||
struct nvkm_device *device = caps->disp->engine.subdev.device;
|
||||
*type = NVKM_OBJECT_MAP_IO;
|
||||
*addr = 0x640000 + device->func->resource_addr(device, 0);
|
||||
*addr = 0x640000 + device->func->resource_addr(device, NVKM_BAR0_PRI);
|
||||
*size = 0x1000;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -303,7 +303,7 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine)
|
|||
}
|
||||
|
||||
/* Allocate USERD + BAR1 polling area. */
|
||||
if (fifo->func->chan.func->userd->bar == 1) {
|
||||
if (fifo->func->chan.func->userd->bar == NVKM_BAR1_FB) {
|
||||
struct nvkm_vmm *bar1 = nvkm_bar_bar1_vmm(device);
|
||||
|
||||
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, fifo->chid->nr *
|
||||
|
|
|
@ -355,14 +355,14 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru
|
|||
/* Validate arguments against class requirements. */
|
||||
if ((runq && runq >= runl->func->runqs) ||
|
||||
(!func->inst->vmm != !vmm) ||
|
||||
((func->userd->bar < 0) == !userd) ||
|
||||
(!func->userd->bar == !userd) ||
|
||||
(!func->ramfc->ctxdma != !dmaobj) ||
|
||||
((func->ramfc->devm < devm) && devm != BIT(0)) ||
|
||||
(!func->ramfc->priv && priv)) {
|
||||
RUNL_DEBUG(runl, "args runq:%d:%d vmm:%d:%p userd:%d:%p "
|
||||
"push:%d:%p devm:%08x:%08x priv:%d:%d",
|
||||
runl->func->runqs, runq, func->inst->vmm, vmm,
|
||||
func->userd->bar < 0, userd, func->ramfc->ctxdma, dmaobj,
|
||||
func->userd->bar, userd, func->ramfc->ctxdma, dmaobj,
|
||||
func->ramfc->devm, devm, func->ramfc->priv, priv);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -439,7 +439,7 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru
|
|||
/* Allocate channel ID. */
|
||||
chan->id = nvkm_chid_get(runl->chid, chan);
|
||||
if (chan->id >= 0) {
|
||||
if (func->userd->bar < 0) {
|
||||
if (!func->userd->bar) {
|
||||
if (ouserd + chan->func->userd->size >=
|
||||
nvkm_memory_size(userd)) {
|
||||
RUNL_DEBUG(runl, "ouserd %llx", ouserd);
|
||||
|
|
|
@ -24,7 +24,7 @@ struct nvkm_chan_func {
|
|||
} *inst;
|
||||
|
||||
const struct nvkm_chan_func_userd {
|
||||
int bar;
|
||||
enum nvkm_bar_id bar;
|
||||
u32 base;
|
||||
u32 size;
|
||||
void (*clear)(struct nvkm_chan *);
|
||||
|
|
|
@ -133,7 +133,7 @@ gf100_chan_userd_clear(struct nvkm_chan *chan)
|
|||
|
||||
static const struct nvkm_chan_func_userd
|
||||
gf100_chan_userd = {
|
||||
.bar = 1,
|
||||
.bar = NVKM_BAR1_FB,
|
||||
.size = 0x1000,
|
||||
.clear = gf100_chan_userd_clear,
|
||||
};
|
||||
|
|
|
@ -113,7 +113,7 @@ gk104_chan_ramfc = {
|
|||
|
||||
const struct nvkm_chan_func_userd
|
||||
gk104_chan_userd = {
|
||||
.bar = 1,
|
||||
.bar = NVKM_BAR1_FB,
|
||||
.size = 0x200,
|
||||
.clear = gf100_chan_userd_clear,
|
||||
};
|
||||
|
@ -745,7 +745,7 @@ gk104_fifo_init(struct nvkm_fifo *fifo)
|
|||
{
|
||||
struct nvkm_device *device = fifo->engine.subdev.device;
|
||||
|
||||
if (fifo->func->chan.func->userd->bar == 1)
|
||||
if (fifo->func->chan.func->userd->bar == NVKM_BAR1_FB)
|
||||
nvkm_wr32(device, 0x002254, 0x10000000 | fifo->userd.bar1->addr >> 12);
|
||||
|
||||
nvkm_wr32(device, 0x002100, 0xffffffff);
|
||||
|
|
|
@ -70,7 +70,6 @@ gv100_chan_ramfc = {
|
|||
|
||||
const struct nvkm_chan_func_userd
|
||||
gv100_chan_userd = {
|
||||
.bar = -1,
|
||||
.size = 0x200,
|
||||
.clear = gf100_chan_userd_clear,
|
||||
};
|
||||
|
|
|
@ -154,7 +154,7 @@ nv04_chan_ramfc = {
|
|||
|
||||
const struct nvkm_chan_func_userd
|
||||
nv04_chan_userd = {
|
||||
.bar = 0,
|
||||
.bar = NVKM_BAR0_PRI,
|
||||
.base = 0x800000,
|
||||
.size = 0x010000,
|
||||
};
|
||||
|
|
|
@ -93,7 +93,7 @@ nv40_chan_ramfc = {
|
|||
|
||||
static const struct nvkm_chan_func_userd
|
||||
nv40_chan_userd = {
|
||||
.bar = 0,
|
||||
.bar = NVKM_BAR0_PRI,
|
||||
.base = 0xc00000,
|
||||
.size = 0x001000,
|
||||
};
|
||||
|
|
|
@ -124,7 +124,7 @@ nv50_chan_ramfc = {
|
|||
|
||||
const struct nvkm_chan_func_userd
|
||||
nv50_chan_userd = {
|
||||
.bar = 0,
|
||||
.bar = NVKM_BAR0_PRI,
|
||||
.base = 0xc00000,
|
||||
.size = 0x002000,
|
||||
};
|
||||
|
|
|
@ -258,7 +258,7 @@ nvkm_uchan_map(struct nvkm_object *object, void *argv, u32 argc,
|
|||
struct nvkm_chan *chan = nvkm_uchan(object)->chan;
|
||||
struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
|
||||
|
||||
if (chan->func->userd->bar < 0)
|
||||
if (!chan->func->userd->bar)
|
||||
return -ENOSYS;
|
||||
|
||||
*type = NVKM_OBJECT_MAP_IO;
|
||||
|
|
|
@ -297,7 +297,7 @@ nv20_gr_init(struct nvkm_gr *base)
|
|||
nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
|
||||
|
||||
/* begin RAM config */
|
||||
vramsz = device->func->resource_size(device, 1) - 1;
|
||||
vramsz = device->func->resource_size(device, NVKM_BAR1_FB) - 1;
|
||||
nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
|
||||
nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
|
||||
nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
|
||||
|
|
|
@ -386,7 +386,7 @@ nv40_gr_init(struct nvkm_gr *base)
|
|||
}
|
||||
|
||||
/* begin RAM config */
|
||||
vramsz = device->func->resource_size(device, 1) - 1;
|
||||
vramsz = device->func->resource_size(device, NVKM_BAR1_FB) - 1;
|
||||
switch (device->chipset) {
|
||||
case 0x40:
|
||||
nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
|
||||
|
|
|
@ -82,7 +82,7 @@ gf100_bar_bar2_init(struct nvkm_bar *base)
|
|||
|
||||
static int
|
||||
gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
|
||||
struct lock_class_key *key, int bar_nr)
|
||||
struct lock_class_key *key, enum nvkm_bar_id bar_id)
|
||||
{
|
||||
struct nvkm_device *device = bar->base.subdev.device;
|
||||
resource_size_t bar_len;
|
||||
|
@ -93,14 +93,14 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
bar_len = device->func->resource_size(device, bar_nr);
|
||||
bar_len = device->func->resource_size(device, bar_id);
|
||||
if (!bar_len)
|
||||
return -ENOMEM;
|
||||
if (bar_nr == 3 && bar->bar2_halve)
|
||||
if (bar_id == NVKM_BAR2_INST && bar->bar2_halve)
|
||||
bar_len >>= 1;
|
||||
|
||||
ret = nvkm_vmm_new(device, 0, bar_len, NULL, 0, key,
|
||||
(bar_nr == 3) ? "bar2" : "bar1", &bar_vm->vmm);
|
||||
(bar_id == NVKM_BAR2_INST) ? "bar2" : "bar1", &bar_vm->vmm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -110,7 +110,7 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
|
|||
/*
|
||||
* Bootstrap page table lookup.
|
||||
*/
|
||||
if (bar_nr == 3) {
|
||||
if (bar_id == NVKM_BAR2_INST) {
|
||||
ret = nvkm_vmm_boot(bar_vm->vmm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -129,7 +129,7 @@ gf100_bar_oneinit(struct nvkm_bar *base)
|
|||
|
||||
/* BAR2 */
|
||||
if (bar->base.func->bar2.init) {
|
||||
ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, 3);
|
||||
ret = gf100_bar_oneinit_bar(bar, &bar->bar[0], &bar2_lock, NVKM_BAR2_INST);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -138,7 +138,7 @@ gf100_bar_oneinit(struct nvkm_bar *base)
|
|||
}
|
||||
|
||||
/* BAR1 */
|
||||
ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, 1);
|
||||
ret = gf100_bar_oneinit_bar(bar, &bar->bar[1], &bar1_lock, NVKM_BAR1_FB);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
|
|||
|
||||
/* BAR2 */
|
||||
start = 0x0100000000ULL;
|
||||
size = device->func->resource_size(device, 3);
|
||||
size = device->func->resource_size(device, NVKM_BAR2_INST);
|
||||
if (!size)
|
||||
return -ENOMEM;
|
||||
limit = start + size;
|
||||
|
@ -167,7 +167,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
|
|||
|
||||
/* BAR1 */
|
||||
start = 0x0000000000ULL;
|
||||
size = device->func->resource_size(device, 1);
|
||||
size = device->func->resource_size(device, NVKM_BAR1_FB);
|
||||
if (!size)
|
||||
return -ENOMEM;
|
||||
limit = start + size;
|
||||
|
|
|
@ -47,8 +47,8 @@
|
|||
static inline struct io_mapping *
|
||||
fbmem_init(struct nvkm_device *dev)
|
||||
{
|
||||
return io_mapping_create_wc(dev->func->resource_addr(dev, 1),
|
||||
dev->func->resource_size(dev, 1));
|
||||
return io_mapping_create_wc(dev->func->resource_addr(dev, NVKM_BAR1_FB),
|
||||
dev->func->resource_size(dev, NVKM_BAR1_FB));
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
|
|
@ -50,7 +50,7 @@ nvkm_ufault_map(struct nvkm_object *object, void *argv, u32 argc,
|
|||
struct nvkm_fault_buffer *buffer = nvkm_fault_buffer(object);
|
||||
struct nvkm_device *device = buffer->fault->subdev.device;
|
||||
*type = NVKM_OBJECT_MAP_IO;
|
||||
*addr = device->func->resource_addr(device, 3) + buffer->addr;
|
||||
*addr = device->func->resource_addr(device, NVKM_BAR2_INST) + buffer->addr;
|
||||
*size = nvkm_memory_size(buffer->mem);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -191,7 +191,7 @@ r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
|
|||
}
|
||||
*pbar = bar;
|
||||
|
||||
bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE);
|
||||
bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, NVKM_BAR2_INST), PAGE_SIZE);
|
||||
if (!bar->flushBAR2PhysMode)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -901,9 +901,9 @@ r535_gsp_set_system_info(struct nvkm_gsp *gsp)
|
|||
if (IS_ERR(info))
|
||||
return PTR_ERR(info);
|
||||
|
||||
info->gpuPhysAddr = device->func->resource_addr(device, 0);
|
||||
info->gpuPhysFbAddr = device->func->resource_addr(device, 1);
|
||||
info->gpuPhysInstAddr = device->func->resource_addr(device, 3);
|
||||
info->gpuPhysAddr = device->func->resource_addr(device, NVKM_BAR0_PRI);
|
||||
info->gpuPhysFbAddr = device->func->resource_addr(device, NVKM_BAR1_FB);
|
||||
info->gpuPhysInstAddr = device->func->resource_addr(device, NVKM_BAR2_INST);
|
||||
info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev);
|
||||
info->maxUserVa = TASK_SIZE;
|
||||
info->pciConfigMirrorBase = device->pci->func->cfg.addr;
|
||||
|
|
|
@ -153,9 +153,9 @@ r570_gsp_set_system_info(struct nvkm_gsp *gsp)
|
|||
if (IS_ERR(info))
|
||||
return PTR_ERR(info);
|
||||
|
||||
info->gpuPhysAddr = device->func->resource_addr(device, 0);
|
||||
info->gpuPhysFbAddr = device->func->resource_addr(device, 1);
|
||||
info->gpuPhysInstAddr = device->func->resource_addr(device, 3);
|
||||
info->gpuPhysAddr = device->func->resource_addr(device, NVKM_BAR0_PRI);
|
||||
info->gpuPhysFbAddr = device->func->resource_addr(device, NVKM_BAR1_FB);
|
||||
info->gpuPhysInstAddr = device->func->resource_addr(device, NVKM_BAR2_INST);
|
||||
info->nvDomainBusDeviceFunc = pci_dev_id(pdev);
|
||||
info->maxUserVa = TASK_SIZE;
|
||||
info->pciConfigMirrorBase = device->pci->func->cfg.addr;
|
||||
|
|
|
@ -239,7 +239,6 @@ nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int ins
|
|||
struct nvkm_instmem **pimem)
|
||||
{
|
||||
struct nv40_instmem *imem;
|
||||
int bar;
|
||||
|
||||
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
|
@ -247,13 +246,8 @@ nv40_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int ins
|
|||
*pimem = &imem->base;
|
||||
|
||||
/* map bar */
|
||||
if (device->func->resource_size(device, 2))
|
||||
bar = 2;
|
||||
else
|
||||
bar = 3;
|
||||
|
||||
imem->iomem = ioremap_wc(device->func->resource_addr(device, bar),
|
||||
device->func->resource_size(device, bar));
|
||||
imem->iomem = ioremap_wc(device->func->resource_addr(device, NVKM_BAR2_INST),
|
||||
device->func->resource_size(device, NVKM_BAR2_INST));
|
||||
if (!imem->iomem) {
|
||||
nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
|
||||
return -EFAULT;
|
||||
|
|
|
@ -172,7 +172,7 @@ nv50_instobj_kmap(struct nv50_instobj *iobj, struct nvkm_vmm *vmm)
|
|||
|
||||
/* Make the mapping visible to the host. */
|
||||
iobj->bar = bar;
|
||||
iobj->map = ioremap_wc(device->func->resource_addr(device, 3) +
|
||||
iobj->map = ioremap_wc(device->func->resource_addr(device, NVKM_BAR2_INST) +
|
||||
(u32)iobj->bar->addr, size);
|
||||
if (!iobj->map) {
|
||||
nvkm_warn(subdev, "PRAMIN ioremap failed\n");
|
||||
|
|
|
@ -60,7 +60,7 @@ gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
*paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
|
||||
*paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + (*pvma)->addr;
|
||||
*psize = (*pvma)->size;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
|
|||
if ((ret = nvif_unvers(ret, &argv, &argc, args->vn)))
|
||||
return ret;
|
||||
|
||||
*paddr = device->func->resource_addr(device, 1) + addr;
|
||||
*paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + addr;
|
||||
*psize = nvkm_memory_size(memory);
|
||||
*pvma = ERR_PTR(-ENODEV);
|
||||
return 0;
|
||||
|
|
|
@ -57,7 +57,7 @@ nv50_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
*paddr = device->func->resource_addr(device, 1) + (*pvma)->addr;
|
||||
*paddr = device->func->resource_addr(device, NVKM_BAR1_FB) + (*pvma)->addr;
|
||||
*psize = (*pvma)->size;
|
||||
return nvkm_memory_map(memory, 0, bar, *pvma, &uvmm, sizeof(uvmm));
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ nvkm_uvfn_map(struct nvkm_object *object, void *argv, u32 argc,
|
|||
struct nvkm_vfn *vfn = nvkm_uvfn(object)->vfn;
|
||||
struct nvkm_device *device = vfn->subdev.device;
|
||||
|
||||
*addr = device->func->resource_addr(device, 0) + vfn->addr.user;
|
||||
*addr = device->func->resource_addr(device, NVKM_BAR0_PRI) + vfn->addr.user;
|
||||
*size = vfn->func->user.size;
|
||||
*type = NVKM_OBJECT_MAP_IO;
|
||||
return 0;
|
||||
|
|
Loading…
Add table
Reference in a new issue