summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_bo.c
diff options
context:
space:
mode:
authorChristian König <christian.koenig@amd.com>2020-09-08 14:39:36 +0200
committerChristian König <christian.koenig@amd.com>2020-09-11 13:31:23 +0200
commit81b615798e9459ded773cad7a280a796012f2759 (patch)
tree49d3844df2fc959a84e57d6144ea9ba0d43306ee /drivers/gpu/drm/nouveau/nouveau_bo.c
parent7053e0eab473119503f6565b4e398f9a73122481 (diff)
drm/nouveau: stop using TTM placement flags
Those are going to be removed, stop using them here. Instead use the GEM flags from the UAPI. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/389825/?series=81551&rev=1
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c91
1 files changed, 57 insertions, 34 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index a9ad27d86a3c..ddabefaab7cf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -159,8 +159,7 @@ roundup_64(u64 x, u32 y)
}
static void
-nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
- int *align, u64 *size)
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct nvif_device *device = &drm->client.device;
@@ -193,7 +192,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
}
struct nouveau_bo *
-nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
+nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
u32 tile_mode, u32 tile_flags)
{
struct nouveau_drm *drm = cli->drm;
@@ -219,7 +218,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
* mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
* into in nouveau_gem_new().
*/
- if (flags & TTM_PL_FLAG_UNCACHED) {
+ if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) {
/* Determine if we can get a cache-coherent map, forcing
* uncached mapping if we can't.
*/
@@ -259,9 +258,9 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
* Skip page sizes that can't support needed domains.
*/
if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
- (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
+ (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
continue;
- if ((flags & TTM_PL_FLAG_TT) &&
+ if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
@@ -288,13 +287,13 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
}
nvbo->page = vmm->page[pi].shift;
- nouveau_bo_fixup_align(nvbo, flags, align, size);
+ nouveau_bo_fixup_align(nvbo, align, size);
return nvbo;
}
int
-nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
+nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
struct sg_table *sg, struct dma_resv *robj)
{
int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
@@ -304,7 +303,7 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
- nouveau_bo_placement_set(nvbo, flags, 0);
+ nouveau_bo_placement_set(nvbo, domain, 0);
INIT_LIST_HEAD(&nvbo->io_reserve_lru);
ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
@@ -320,19 +319,19 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
int
nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
- uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
+ uint32_t domain, uint32_t tile_mode, uint32_t tile_flags,
struct sg_table *sg, struct dma_resv *robj,
struct nouveau_bo **pnvbo)
{
struct nouveau_bo *nvbo;
int ret;
- nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
+ nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
tile_flags);
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
- ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
+ ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
if (ret)
return ret;
@@ -341,27 +340,28 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
}
static void
-set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
+set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain,
+ uint32_t flags)
{
*n = 0;
- if (type & TTM_PL_FLAG_VRAM)
+ if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
- if (type & TTM_PL_FLAG_TT)
+ if (domain & NOUVEAU_GEM_DOMAIN_GART)
pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
- if (type & TTM_PL_FLAG_SYSTEM)
+ if (domain & NOUVEAU_GEM_DOMAIN_CPU)
pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
}
static void
-set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
+set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
unsigned i, fpfn, lpfn;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
- nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
+ nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
nvbo->bo.mem.num_pages < vram_pages / 4) {
/*
* Make sure that the color and depth buffers are handled
@@ -388,7 +388,8 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
}
void
-nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
+nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
+ uint32_t busy)
{
struct ttm_placement *pl = &nvbo->placement;
uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
@@ -397,17 +398,17 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
pl->placement = nvbo->placements;
set_placement_list(nvbo->placements, &pl->num_placement,
- type, flags);
+ domain, flags);
pl->busy_placement = nvbo->busy_placements;
set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
- type | busy, flags);
+ domain | busy, flags);
- set_placement_range(nvbo, type);
+ set_placement_range(nvbo, domain);
}
int
-nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
+nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
@@ -419,7 +420,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
return ret;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
- memtype == TTM_PL_FLAG_VRAM && contig) {
+ domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
if (!nvbo->contig) {
nvbo->contig = true;
force = true;
@@ -428,10 +429,22 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
}
if (nvbo->pin_refcnt) {
- if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
+ bool error = evict;
+
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
+ break;
+ case TTM_PL_TT:
+ error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
+ default:
+ break;
+ }
+
+ if (error) {
NV_ERROR(drm, "bo %p pinned elsewhere: "
"0x%08x vs 0x%08x\n", bo,
- 1 << bo->mem.mem_type, memtype);
+ bo->mem.mem_type, domain);
ret = -EBUSY;
}
nvbo->pin_refcnt++;
@@ -439,14 +452,14 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
}
if (evict) {
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
ret = nouveau_bo_validate(nvbo, false, false);
if (ret)
goto out;
}
nvbo->pin_refcnt++;
- nouveau_bo_placement_set(nvbo, memtype, 0);
+ nouveau_bo_placement_set(nvbo, domain, 0);
/* drop pin_refcnt temporarily, so we don't trip the assertion
* in nouveau_bo_move() that makes sure we're not trying to
@@ -492,7 +505,16 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
if (ref)
goto out;
- nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
+ break;
+ case TTM_PL_TT:
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
+ break;
+ default:
+ break;
+ }
ret = nouveau_bo_validate(nvbo, false, false);
if (ret == 0) {
@@ -702,11 +724,11 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
- TTM_PL_FLAG_SYSTEM);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
+ NOUVEAU_GEM_DOMAIN_CPU);
break;
default:
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0);
break;
}
@@ -1214,7 +1236,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
return 0;
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
- nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
+ 0);
ret = nouveau_bo_validate(nvbo, false, false);
if (ret)
@@ -1238,7 +1261,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->busy_placements[i].lpfn = mappable;
}
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
return nouveau_bo_validate(nvbo, false, false);
}