summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2019-07-16 04:07:13 +1000
committerDave Airlie <airlied@redhat.com>2019-07-16 04:07:13 +1000
commit3729fe2bc2a01f4cc1aa88be8f64af06084c87d6 (patch)
tree462102b65a8cec402bc4726eef6946bdd9113111 /drivers/gpu
parent7e4b4dfc98d54bc79f7ca29c8bc6307ed2948014 (diff)
Revert "Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next"
This reverts commit 031e610a6a21448a63dff7a0416e5e206724caac, reversing changes made to 52d2d44eee8091e740d0d275df1311fb8373c9a9. The mm changes in there we premature and not fully ack or reviewed by core mm folks, I dropped the ball by merging them via this tree, so lets take em all back out. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c169
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig1
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h233
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_lock.c100
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_lock.h30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c167
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h139
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c472
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c245
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c405
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c74
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h16
21 files changed, 445 insertions, 1685 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a7fd5a4955c9..58c403eda04e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1739,7 +1739,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
mutex_unlock(&ttm_global_mutex);
- bdev->vm_ops = &ttm_bo_vm_ops;
return 0;
out_no_sys:
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 0c4576cbafcf..6dacff49c1cc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -42,6 +42,8 @@
#include <linux/uaccess.h>
#include <linux/mem_encrypt.h>
+#define TTM_BO_VM_NUM_PREFAULT 16
+
static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
{
@@ -104,30 +106,25 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
+ page_offset;
}
-/**
- * ttm_bo_vm_reserve - Reserve a buffer object in a retryable vm callback
- * @bo: The buffer object
- * @vmf: The fault structure handed to the callback
- *
- * vm callbacks like fault() and *_mkwrite() allow for the mm_sem to be dropped
- * during long waits, and after the wait the callback will be restarted. This
- * is to allow other threads using the same virtual memory space concurrent
- * access to map(), unmap() completely unrelated buffer objects. TTM buffer
- * object reservations sometimes wait for GPU and should therefore be
- * considered long waits. This function reserves the buffer object interruptibly
- * taking this into account. Starvation is avoided by the vm system not
- * allowing too many repeated restarts.
- * This function is intended to be used in customized fault() and _mkwrite()
- * handlers.
- *
- * Return:
- * 0 on success and the bo was reserved.
- * VM_FAULT_RETRY if blocking wait.
- * VM_FAULT_NOPAGE if blocking wait and retrying was not allowed.
- */
-vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
- struct vm_fault *vmf)
+static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
{
+ struct vm_area_struct *vma = vmf->vma;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
+ vma->vm_private_data;
+ struct ttm_bo_device *bdev = bo->bdev;
+ unsigned long page_offset;
+ unsigned long page_last;
+ unsigned long pfn;
+ struct ttm_tt *ttm = NULL;
+ struct page *page;
+ int err;
+ int i;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ unsigned long address = vmf->address;
+ struct ttm_mem_type_manager *man =
+ &bdev->man[bo->mem.mem_type];
+ struct vm_area_struct cvma;
+
/*
* Work around locking order reversal in fault / nopfn
* between mmap_sem and bo_reserve: Perform a trylock operation
@@ -154,55 +151,14 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
return VM_FAULT_NOPAGE;
}
- return 0;
-}
-EXPORT_SYMBOL(ttm_bo_vm_reserve);
-
-/**
- * ttm_bo_vm_fault_reserved - TTM fault helper
- * @vmf: The struct vm_fault given as argument to the fault callback
- * @prot: The page protection to be used for this memory area.
- * @num_prefault: Maximum number of prefault pages. The caller may want to
- * specify this based on madvice settings and the size of the GPU object
- * backed by the memory.
- *
- * This function inserts one or more page table entries pointing to the
- * memory backing the buffer object, and then returns a return code
- * instructing the caller to retry the page access.
- *
- * Return:
- * VM_FAULT_NOPAGE on success or pending signal
- * VM_FAULT_SIGBUS on unspecified error
- * VM_FAULT_OOM on out-of-memory
- * VM_FAULT_RETRY if retryable wait
- */
-vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
- pgprot_t prot,
- pgoff_t num_prefault)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct vm_area_struct cvma = *vma;
- struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
- vma->vm_private_data;
- struct ttm_bo_device *bdev = bo->bdev;
- unsigned long page_offset;
- unsigned long page_last;
- unsigned long pfn;
- struct ttm_tt *ttm = NULL;
- struct page *page;
- int err;
- pgoff_t i;
- vm_fault_t ret = VM_FAULT_NOPAGE;
- unsigned long address = vmf->address;
- struct ttm_mem_type_manager *man =
- &bdev->man[bo->mem.mem_type];
-
/*
* Refuse to fault imported pages. This should be handled
* (if at all) by redirecting mmap to the exporter.
*/
- if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
- return VM_FAULT_SIGBUS;
+ if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ ret = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
if (bdev->driver->fault_reserve_notify) {
struct dma_fence *moving = dma_fence_get(bo->moving);
@@ -213,9 +169,11 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
break;
case -EBUSY:
case -ERESTARTSYS:
- return VM_FAULT_NOPAGE;
+ ret = VM_FAULT_NOPAGE;
+ goto out_unlock;
default:
- return VM_FAULT_SIGBUS;
+ ret = VM_FAULT_SIGBUS;
+ goto out_unlock;
}
if (bo->moving != moving) {
@@ -231,12 +189,21 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
* move.
*/
ret = ttm_bo_vm_fault_idle(bo, vmf);
- if (unlikely(ret != 0))
- return ret;
+ if (unlikely(ret != 0)) {
+ if (ret == VM_FAULT_RETRY &&
+ !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ /* The BO has already been unreserved. */
+ return ret;
+ }
+
+ goto out_unlock;
+ }
err = ttm_mem_io_lock(man, true);
- if (unlikely(err != 0))
- return VM_FAULT_NOPAGE;
+ if (unlikely(err != 0)) {
+ ret = VM_FAULT_NOPAGE;
+ goto out_unlock;
+ }
err = ttm_mem_io_reserve_vm(bo);
if (unlikely(err != 0)) {
ret = VM_FAULT_SIGBUS;
@@ -253,8 +220,18 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
goto out_io_unlock;
}
- cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot);
- if (!bo->mem.bus.is_iomem) {
+ /*
+ * Make a local vma copy to modify the page_prot member
+ * and vm_flags if necessary. The vma parameter is protected
+ * by mmap_sem in write mode.
+ */
+ cvma = *vma;
+ cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
+
+ if (bo->mem.bus.is_iomem) {
+ cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
+ cvma.vm_page_prot);
+ } else {
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
@@ -263,21 +240,24 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
};
ttm = bo->ttm;
- if (ttm_tt_populate(bo->ttm, &ctx)) {
+ cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
+ cvma.vm_page_prot);
+
+ /* Allocate all page at once, most common usage */
+ if (ttm_tt_populate(ttm, &ctx)) {
ret = VM_FAULT_OOM;
goto out_io_unlock;
}
- } else {
- /* Iomem should not be marked encrypted */
- cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
}
/*
* Speculatively prefault a number of pages. Only error on
* first page.
*/
- for (i = 0; i < num_prefault; ++i) {
+ for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
if (bo->mem.bus.is_iomem) {
+ /* Iomem should not be marked encrypted */
+ cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
pfn = ttm_bo_io_mem_pfn(bo, page_offset);
} else {
page = ttm->pages[page_offset];
@@ -315,26 +295,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
ret = VM_FAULT_NOPAGE;
out_io_unlock:
ttm_mem_io_unlock(man);
- return ret;
-}
-EXPORT_SYMBOL(ttm_bo_vm_fault_reserved);
-
-static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- pgprot_t prot;
- struct ttm_buffer_object *bo = vma->vm_private_data;
- vm_fault_t ret;
-
- ret = ttm_bo_vm_reserve(bo, vmf);
- if (ret)
- return ret;
-
- prot = vm_get_page_prot(vma->vm_flags);
- ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT);
- if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- return ret;
-
+out_unlock:
reservation_object_unlock(bo->resv);
return ret;
}
@@ -434,7 +395,7 @@ static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
return ret;
}
-const struct vm_operations_struct ttm_bo_vm_ops = {
+static const struct vm_operations_struct ttm_bo_vm_ops = {
.fault = ttm_bo_vm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
@@ -487,7 +448,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
if (unlikely(ret != 0))
goto out_unref;
- vma->vm_ops = bdev->vm_ops;
+ vma->vm_ops = &ttm_bo_vm_ops;
/*
* Note: We're transferring the bo reference to
@@ -519,7 +480,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
ttm_bo_get(bo);
- vma->vm_ops = bo->bdev->vm_ops;
+ vma->vm_ops = &ttm_bo_vm_ops;
vma->vm_private_data = bo;
vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND;
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index d5fd81a521f6..6b28a326f8bb 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -8,7 +8,6 @@ config DRM_VMWGFX
select FB_CFB_IMAGEBLIT
select DRM_TTM
select FB
- select AS_DIRTY_HELPERS
# Only needed for the transitional use of drm_crtc_init - can be removed
# again once vmwgfx sets up the primary plane itself.
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index c877a21a0739..8841bd30e1e5 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -8,7 +8,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
- vmwgfx_validation.o vmwgfx_page_dirty.o \
+ vmwgfx_validation.o \
ttm_object.o ttm_lock.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
index 61414f105c67..f2bfd3d80598 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -1280,6 +1280,7 @@ svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
return offset;
}
+
static inline u32
svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
surf_size_struct baseLevelSize,
@@ -1374,236 +1375,4 @@ svga3dsurface_is_screen_target_format(SVGA3dSurfaceFormat format)
return svga3dsurface_is_dx_screen_target_format(format);
}
-/**
- * struct svga3dsurface_mip - Mimpmap level information
- * @bytes: Bytes required in the backing store of this mipmap level.
- * @img_stride: Byte stride per image.
- * @row_stride: Byte stride per block row.
- * @size: The size of the mipmap.
- */
-struct svga3dsurface_mip {
- size_t bytes;
- size_t img_stride;
- size_t row_stride;
- struct drm_vmw_size size;
-
-};
-
-/**
- * struct svga3dsurface_cache - Cached surface information
- * @desc: Pointer to the surface descriptor
- * @mip: Array of mipmap level information. Valid size is @num_mip_levels.
- * @mip_chain_bytes: Bytes required in the backing store for the whole chain
- * of mip levels.
- * @sheet_bytes: Bytes required in the backing store for a sheet
- * representing a single sample.
- * @num_mip_levels: Valid size of the @mip array. Number of mipmap levels in
- * a chain.
- * @num_layers: Number of slices in an array texture or number of faces in
- * a cubemap texture.
- */
-struct svga3dsurface_cache {
- const struct svga3d_surface_desc *desc;
- struct svga3dsurface_mip mip[DRM_VMW_MAX_MIP_LEVELS];
- size_t mip_chain_bytes;
- size_t sheet_bytes;
- u32 num_mip_levels;
- u32 num_layers;
-};
-
-/**
- * struct svga3dsurface_loc - Surface location
- * @sub_resource: Surface subresource. Defined as layer * num_mip_levels +
- * mip_level.
- * @x: X coordinate.
- * @y: Y coordinate.
- * @z: Z coordinate.
- */
-struct svga3dsurface_loc {
- u32 sub_resource;
- u32 x, y, z;
-};
-
-/**
- * svga3dsurface_subres - Compute the subresource from layer and mipmap.
- * @cache: Surface layout data.
- * @mip_level: The mipmap level.
- * @layer: The surface layer (face or array slice).
- *
- * Return: The subresource.
- */
-static inline u32 svga3dsurface_subres(const struct svga3dsurface_cache *cache,
- u32 mip_level, u32 layer)
-{
- return cache->num_mip_levels * layer + mip_level;
-}
-
-/**
- * svga3dsurface_setup_cache - Build a surface cache entry
- * @size: The surface base level dimensions.
- * @format: The surface format.
- * @num_mip_levels: Number of mipmap levels.
- * @num_layers: Number of layers.
- * @cache: Pointer to a struct svga3dsurface_cach object to be filled in.
- *
- * Return: Zero on success, -EINVAL on invalid surface layout.
- */
-static inline int svga3dsurface_setup_cache(const struct drm_vmw_size *size,
- SVGA3dSurfaceFormat format,
- u32 num_mip_levels,
- u32 num_layers,
- u32 num_samples,
- struct svga3dsurface_cache *cache)
-{
- const struct svga3d_surface_desc *desc;
- u32 i;
-
- memset(cache, 0, sizeof(*cache));
- cache->desc = desc = svga3dsurface_get_desc(format);
- cache->num_mip_levels = num_mip_levels;
- cache->num_layers = num_layers;
- for (i = 0; i < cache->num_mip_levels; i++) {
- struct svga3dsurface_mip *mip = &cache->mip[i];
-
- mip->size = svga3dsurface_get_mip_size(*size, i);
- mip->bytes = svga3dsurface_get_image_buffer_size
- (desc, &mip->size, 0);
- mip->row_stride =
- __KERNEL_DIV_ROUND_UP(mip->size.width,
- desc->block_size.width) *
- desc->bytes_per_block * num_samples;
- if (!mip->row_stride)
- goto invalid_dim;
-
- mip->img_stride =
- __KERNEL_DIV_ROUND_UP(mip->size.height,
- desc->block_size.height) *
- mip->row_stride;
- if (!mip->img_stride)
- goto invalid_dim;
-
- cache->mip_chain_bytes += mip->bytes;
- }
- cache->sheet_bytes = cache->mip_chain_bytes * num_layers;
- if (!cache->sheet_bytes)
- goto invalid_dim;
-
- return 0;
-
-invalid_dim:
- VMW_DEBUG_USER("Invalid surface layout for dirty tracking.\n");
- return -EINVAL;
-}
-
-/**
- * svga3dsurface_get_loc - Get a surface location from an offset into the
- * backing store
- * @cache: Surface layout data.
- * @loc: Pointer to a struct svga3dsurface_loc to be filled in.
- * @offset: Offset into the surface backing store.
- */
-static inline void
-svga3dsurface_get_loc(const struct svga3dsurface_cache *cache,
- struct svga3dsurface_loc *loc,
- size_t offset)
-{
- const struct svga3dsurface_mip *mip = &cache->mip[0];
- const struct svga3d_surface_desc *desc = cache->desc;
- u32 layer;
- int i;
-
- if (offset >= cache->sheet_bytes)
- offset %= cache->sheet_bytes;
-
- layer = offset / cache->mip_chain_bytes;
- offset -= layer * cache->mip_chain_bytes;
- for (i = 0; i < cache->num_mip_levels; ++i, ++mip) {
- if (mip->bytes > offset)
- break;
- offset -= mip->bytes;
- }
-
- loc->sub_resource = svga3dsurface_subres(cache, i, layer);
- loc->z = offset / mip->img_stride;
- offset -= loc->z * mip->img_stride;
- loc->z *= desc->block_size.depth;
- loc->y = offset / mip->row_stride;
- offset -= loc->y * mip->row_stride;
- loc->y *= desc->block_size.height;
- loc->x = offset / desc->bytes_per_block;
- loc->x *= desc->block_size.width;
-}
-
-/**
- * svga3dsurface_inc_loc - Clamp increment a surface location with one block
- * size
- * in each dimension.
- * @loc: Pointer to a struct svga3dsurface_loc to be incremented.
- *
- * When computing the size of a range as size = end - start, the range does not
- * include the end element. However a location representing the last byte
- * of a touched region in the backing store *is* included in the range.
- * This function modifies such a location to match the end definition
- * given as start + size which is the one used in a SVGA3dBox.
- */
-static inline void
-svga3dsurface_inc_loc(const struct svga3dsurface_cache *cache,
- struct svga3dsurface_loc *loc)
-{
- const struct svga3d_surface_desc *desc = cache->desc;
- u32 mip = loc->sub_resource % cache->num_mip_levels;
- const struct drm_vmw_size *size = &cache->mip[mip].size;
-
- loc->sub_resource++;
- loc->x += desc->block_size.width;
- if (loc->x > size->width)
- loc->x = size->width;
- loc->y += desc->block_size.height;
- if (loc->y > size->height)
- loc->y = size->height;
- loc->z += desc->block_size.depth;
- if (loc->z > size->depth)
- loc->z = size->depth;
-}
-
-/**
- * svga3dsurface_min_loc - The start location in a subresource
- * @cache: Surface layout data.
- * @sub_resource: The subresource.
- * @loc: Pointer to a struct svga3dsurface_loc to be filled in.
- */
-static inline void
-svga3dsurface_min_loc(const struct svga3dsurface_cache *cache,
- u32 sub_resource,
- struct svga3dsurface_loc *loc)
-{
- loc->sub_resource = sub_resource;
- loc->x = loc->y = loc->z = 0;
-}
-
-/**
- * svga3dsurface_min_loc - The end location in a subresource
- * @cache: Surface layout data.
- * @sub_resource: The subresource.
- * @loc: Pointer to a struct svga3dsurface_loc to be filled in.
- *
- * Following the end definition given in svga3dsurface_inc_loc(),
- * Compute the end location of a surface subresource.
- */
-static inline void
-svga3dsurface_max_loc(const struct svga3dsurface_cache *cache,
- u32 sub_resource,
- struct svga3dsurface_loc *loc)
-{
- const struct drm_vmw_size *size;
- u32 mip;
-
- loc->sub_resource = sub_resource + 1;
- mip = sub_resource % cache->num_mip_levels;
- size = &cache->mip[mip].size;
- loc->x = size->width;
- loc->y = size->height;
- loc->z = size->depth;
-}
-
#endif /* _SVGA3D_SURFACEDEFS_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/ttm_lock.c b/drivers/gpu/drm/vmwgfx/ttm_lock.c
index 5971c72e6d10..16b2083cb9d4 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_lock.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_lock.c
@@ -29,6 +29,7 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
+#include <drm/ttm/ttm_module.h>
#include <linux/atomic.h>
#include <linux/errno.h>
#include <linux/wait.h>
@@ -48,6 +49,8 @@ void ttm_lock_init(struct ttm_lock *lock)
init_waitqueue_head(&lock->queue);
lock->rw = 0;
lock->flags = 0;
+ lock->kill_takers = false;
+ lock->signal = SIGKILL;
}
void ttm_read_unlock(struct ttm_lock *lock)
@@ -63,6 +66,11 @@ static bool __ttm_read_lock(struct ttm_lock *lock)
bool locked = false;
spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
if (lock->rw >= 0 && lock->flags == 0) {
++lock->rw;
locked = true;
@@ -90,6 +98,11 @@ static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
*locked = false;
spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
if (lock->rw >= 0 && lock->flags == 0) {
++lock->rw;
block = false;
@@ -134,6 +147,11 @@ static bool __ttm_write_lock(struct ttm_lock *lock)
bool locked = false;
spin_lock(&lock->lock);
+ if (unlikely(lock->kill_takers)) {
+ send_sig(lock->signal, current, 0);
+ spin_unlock(&lock->lock);
+ return false;
+ }
if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
lock->rw = -1;
lock->flags &= ~TTM_WRITE_LOCK_PENDING;
@@ -164,6 +182,88 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
return ret;
}
+static int __ttm_vt_unlock(struct ttm_lock *lock)
+{
+ int ret = 0;
+
+ spin_lock(&lock->lock);
+ if (unlikely(!(lock->flags & TTM_VT_LOCK)))
+ ret = -EINVAL;
+ lock->flags &= ~TTM_VT_LOCK;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+
+ return ret;
+}
+
+static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
+{
+ struct ttm_base_object *base = *p_base;
+ struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
+ int ret;
+
+ *p_base = NULL;
+ ret = __ttm_vt_unlock(lock);
+ BUG_ON(ret != 0);
+}
+
+static bool __ttm_vt_lock(struct ttm_lock *lock)
+{
+ bool locked = false;
+
+ spin_lock(&lock->lock);
+ if (lock->rw == 0) {
+ lock->flags &= ~TTM_VT_LOCK_PENDING;
+ lock->flags |= TTM_VT_LOCK;
+ locked = true;
+ } else {
+ lock->flags |= TTM_VT_LOCK_PENDING;
+ }
+ spin_unlock(&lock->lock);
+ return locked;
+}
+
+int ttm_vt_lock(struct ttm_lock *lock,
+ bool interruptible,
+ struct ttm_object_file *tfile)
+{
+ int ret = 0;
+
+ if (interruptible) {
+ ret = wait_event_interruptible(lock->queue,
+ __ttm_vt_lock(lock));
+ if (unlikely(ret != 0)) {
+ spin_lock(&lock->lock);
+ lock->flags &= ~TTM_VT_LOCK_PENDING;
+ wake_up_all(&lock->queue);
+ spin_unlock(&lock->lock);
+ return ret;
+ }
+ } else
+ wait_event(lock->queue, __ttm_vt_lock(lock));
+
+ /*
+ * Add a base-object, the destructor of which will
+ * make sure the lock is released if the client dies
+ * while holding it.
+ */
+
+ ret = ttm_base_object_init(tfile, &lock->base, false,
+ ttm_lock_type, &ttm_vt_lock_remove, NULL);
+ if (ret)
+ (void)__ttm_vt_unlock(lock);
+ else
+ lock->vt_holder = tfile;
+
+ return ret;
+}
+
+int ttm_vt_unlock(struct ttm_lock *lock)
+{
+ return ttm_ref_object_base_unref(lock->vt_holder,
+ lock->base.handle, TTM_REF_USAGE);
+}
+
void ttm_suspend_unlock(struct ttm_lock *lock)
{
spin_lock(&lock->lock);
diff --git a/drivers/gpu/drm/vmwgfx/ttm_lock.h b/drivers/gpu/drm/vmwgfx/ttm_lock.h
index 3d454e8b491f..0c3af9836863 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_lock.h
+++ b/drivers/gpu/drm/vmwgfx/ttm_lock.h
@@ -63,6 +63,8 @@
* @lock: Spinlock protecting some lock members.
* @rw: Read-write lock counter. Protected by @lock.
* @flags: Lock state. Protected by @lock.
+ * @kill_takers: Boolean whether to kill takers of the lock.
+ * @signal: Signal to send when kill_takers is true.
*/
struct ttm_lock {
@@ -71,6 +73,9 @@ struct ttm_lock {
spinlock_t lock;
int32_t rw;
uint32_t flags;
+ bool kill_takers;
+ int signal;
+ struct ttm_object_file *vt_holder;
};
@@ -215,4 +220,29 @@ extern void ttm_write_unlock(struct ttm_lock *lock);
*/
extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
+/**
+ * ttm_lock_set_kill
+ *
+ * @lock: Pointer to a struct ttm_lock
+ * @val: Boolean whether to kill processes taking the lock.
+ * @signal: Signal to send to the process taking the lock.
+ *
+ * The kill-when-taking-lock functionality is used to kill processes that keep
+ * on using the TTM functionality when its resources has been taken down, for
+ * example when the X server exits. A typical sequence would look like this:
+ * - X server takes lock in write mode.
+ * - ttm_lock_set_kill() is called with @val set to true.
+ * - As part of X server exit, TTM resources are taken down.
+ * - X server releases the lock on file release.
+ * - Another dri client wants to render, takes the lock and is killed.
+ *
+ */
+static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
+ int signal)
+{
+ lock->kill_takers = val;
+ if (val)
+ lock->signal = signal;
+}
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index e8bc7a7ac031..5d5c2bce01f3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -463,8 +463,6 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
- WARN_ON(vmw_bo->dirty);
- WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
vmw_bo_unmap(vmw_bo);
kfree(vmw_bo);
}
@@ -478,11 +476,8 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
- struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
- WARN_ON(vbo->dirty);
- WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
- vmw_bo_unmap(vbo);
+ vmw_bo_unmap(&vmw_user_bo->vbo);
ttm_prime_object_kfree(vmw_user_bo, prime);
}
@@ -515,9 +510,8 @@ int vmw_bo_init(struct vmw_private *dev_priv,
acc_size = vmw_bo_acc_size(dev_priv, size, user);
memset(vmw_bo, 0, sizeof(*vmw_bo));
- BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
- vmw_bo->base.priority = 3;
- vmw_bo->res_tree = RB_ROOT;
+
+ INIT_LIST_HEAD(&vmw_bo->res_list);
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index a56c9d802382..63f111068a44 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -88,8 +88,6 @@ static const struct vmw_res_func vmw_gb_context_func = {
.res_type = vmw_res_context,
.needs_backup = true,
.may_evict = true,
- .prio = 3,
- .dirty_prio = 3,
.type_name = "guest backed contexts",
.backup_placement = &vmw_mob_placement,
.create = vmw_gb_context_create,
@@ -102,8 +100,6 @@ static const struct vmw_res_func vmw_dx_context_func = {
.res_type = vmw_res_dx_context,
.needs_backup = true,
.may_evict = true,
- .prio = 3,
- .dirty_prio = 3,
.type_name = "dx contexts",
.backup_placement = &vmw_mob_placement,
.create = vmw_dx_context_create,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 8c699cb2565b..b4f6e1217c9d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -116,8 +116,6 @@ static const struct vmw_res_func vmw_cotable_func = {
.res_type = vmw_res_cotable,
.needs_backup = true,
.may_evict = true,
- .prio = 3,
- .dirty_prio = 3,
.type_name = "context guest backed object tables",
.backup_placement = &vmw_mob_placement,
.create = vmw_cotable_create,
@@ -309,7 +307,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
struct ttm_buffer_object *bo = val_buf->bo;
struct vmw_fence_obj *fence;
- if (!vmw_resource_mob_attached(res))
+ if (list_empty(&res->mob_head))
return 0;
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
@@ -455,7 +453,6 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
goto out_wait;
}
- vmw_resource_mob_detach(res);
res->backup = buf;
res->backup_size = new_size;
vcotbl->size_read_back = cur_size_read_back;
@@ -470,12 +467,12 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
res->backup = old_buf;
res->backup_size = old_size;
vcotbl->size_read_back = old_size_read_back;
- vmw_resource_mob_attach(res);
goto out_wait;
}
- vmw_resource_mob_attach(res);
/* Let go of the old mob. */
+ list_del(&res->mob_head);
+ list_add_tail(&res->mob_head, &buf->res_list);
vmw_bo_unreference(&old_buf);
res->id = vcotbl->type;
@@ -499,7 +496,7 @@ out_wait:
* is called before bind() in the validation sequence is instead used for two
* things.
* 1) Unscrub the cotable if it is scrubbed and still attached to a backup
- * buffer.
+ * buffer, that is, if @res->mob_head is non-empty.
* 2) Resize the c