diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 70 |
1 files changed, 51 insertions, 19 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 18c72c0b478d..d9e57290dc71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -45,7 +45,7 @@ struct amdgpu_bo_list_entry; #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF /* number of entries in page table */ -#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) +#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size) /* PTBs (Page Table Blocks) need to be aligned to 32K */ #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 @@ -53,26 +53,44 @@ struct amdgpu_bo_list_entry; /* LOG2 number of continuous pages for the fragment field */ #define AMDGPU_LOG2_PAGES_PER_FRAG 4 -#define AMDGPU_PTE_VALID (1 << 0) -#define AMDGPU_PTE_SYSTEM (1 << 1) -#define AMDGPU_PTE_SNOOPED (1 << 2) +#define AMDGPU_PTE_VALID (1ULL << 0) +#define AMDGPU_PTE_SYSTEM (1ULL << 1) +#define AMDGPU_PTE_SNOOPED (1ULL << 2) /* VI only */ -#define AMDGPU_PTE_EXECUTABLE (1 << 4) +#define AMDGPU_PTE_EXECUTABLE (1ULL << 4) -#define AMDGPU_PTE_READABLE (1 << 5) -#define AMDGPU_PTE_WRITEABLE (1 << 6) +#define AMDGPU_PTE_READABLE (1ULL << 5) +#define AMDGPU_PTE_WRITEABLE (1ULL << 6) -#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7) +#define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7) + +#define AMDGPU_PTE_PRT (1ULL << 63) + +/* VEGA10 only */ +#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57) +#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL) /* How to programm VM fault handling */ #define AMDGPU_VM_FAULT_STOP_NEVER 0 #define AMDGPU_VM_FAULT_STOP_FIRST 1 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 +/* max number of VMHUB */ +#define AMDGPU_MAX_VMHUBS 2 +#define AMDGPU_GFXHUB 0 +#define AMDGPU_MMHUB 1 + +/* hardcode that limit for now */ +#define AMDGPU_VA_RESERVED_SIZE (8 << 20) + struct amdgpu_vm_pt { struct amdgpu_bo *bo; uint64_t addr; + + /* array of page tables, one for each directory entry */ + struct amdgpu_vm_pt *entries; + unsigned last_entry_used; }; struct amdgpu_vm { @@ -92,14 +110,10 @@ struct amdgpu_vm { struct list_head freed; /* contains the page directory */ - struct amdgpu_bo *page_directory; - unsigned max_pde_used; - struct dma_fence *page_directory_fence; + struct amdgpu_vm_pt root; + struct dma_fence *last_dir_update; uint64_t last_eviction_counter; - /* array of page tables, one for each page directory entry */ - struct amdgpu_vm_pt *page_tables; - /* for id and flush management per ring */ struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS]; @@ -117,7 +131,6 @@ struct amdgpu_vm { struct amdgpu_vm_id { struct list_head list; - struct dma_fence *first; struct amdgpu_sync active; struct dma_fence *last_flush; atomic64_t owner; @@ -147,7 +160,10 @@ struct amdgpu_vm_manager { u64 fence_context; unsigned seqno[AMDGPU_MAX_RINGS]; - uint32_t max_pfn; + uint64_t max_pfn; + uint32_t num_level; + uint64_t vm_size; + uint32_t block_size; /* vram base address for page table entry */ u64 vram_base_offset; /* is vm enabled? */ @@ -159,6 +175,10 @@ struct amdgpu_vm_manager { atomic_t vm_pte_next_ring; /* client id counter */ atomic64_t client_counter; + + /* partial resident texture handling */ + spinlock_t prt_lock; + atomic_t num_prt_users; }; void amdgpu_vm_manager_init(struct amdgpu_device *adev); @@ -173,15 +193,19 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, void *param); void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, struct amdgpu_vm *vm); +int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + uint64_t saddr, uint64_t size); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync, struct dma_fence *fence, struct amdgpu_job *job); int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); -int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, - struct amdgpu_vm *vm); +int amdgpu_vm_update_directories(struct amdgpu_device *adev, + struct amdgpu_vm *vm); int amdgpu_vm_clear_freed(struct amdgpu_device *adev, - struct amdgpu_vm *vm); + struct amdgpu_vm *vm, + struct dma_fence **fence); int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_sync *sync); int amdgpu_vm_bo_update(struct amdgpu_device *adev, @@ -198,10 +222,18 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, uint64_t addr, uint64_t offset, uint64_t size, uint64_t flags); +int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, + struct amdgpu_bo_va *bo_va, + uint64_t addr, uint64_t offset, + uint64_t size, uint64_t flags); int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, uint64_t addr); +int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + uint64_t saddr, uint64_t size); void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va); +void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size); #endif |