summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/selftests/huge_pages.c
diff options
context:
space:
mode:
authorMatthew Auld <matthew.auld@intel.com>2018-10-29 20:37:34 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2018-10-31 08:48:46 +0000
commit79c03caac2ff362304935aafc1be0111d36d1ce5 (patch)
treeaab16c1f7ea649ffe573b68def5ca972fce12a6f /drivers/gpu/drm/i915/selftests/huge_pages.c
parente5ee4956f2fdda8ac2d3eab309df389d43b80fc0 (diff)
drm/i915/selftest: test aligned offsets for 64K
When using softpin it's not enough to just pad the vma size, we also need to ensure the vma offset is at the start of the pt boundary, if we plan to utilize 64K pages. Therefore to improve test coverage we should use both aligned and unaligned gtt offsets in igt_write_huge. Suggested-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20181029203734.21936-1-matthew.auld@intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/huge_pages.c')
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c22
1 files changed, 18 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 256001b00e32..26c065c8d2c0 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -1135,7 +1135,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
n = 0;
for_each_engine(engine, i915, id) {
if (!intel_engine_can_store_dword(engine)) {
- pr_info("store-dword-imm not supported on engine=%u\n", id);
+ pr_info("store-dword-imm not supported on engine=%u\n",
+ id);
continue;
}
engines[n++] = engine;
@@ -1167,17 +1168,30 @@ static int igt_write_huge(struct i915_gem_context *ctx,
engine = engines[order[i] % n];
i = (i + 1) % (n * I915_NUM_ENGINES);
- err = __igt_write_huge(ctx, engine, obj, size, offset_low, dword, num + 1);
+ /*
+ * In order to utilize 64K pages we need to both pad the vma
+ * size and ensure the vma offset is at the start of the pt
+ * boundary, however to improve coverage we opt for testing both
+ * aligned and unaligned offsets.
+ */
+ if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+ offset_low = round_down(offset_low,
+ I915_GTT_PAGE_SIZE_2M);
+
+ err = __igt_write_huge(ctx, engine, obj, size, offset_low,
+ dword, num + 1);
if (err)
break;
- err = __igt_write_huge(ctx, engine, obj, size, offset_high, dword, num + 1);
+ err = __igt_write_huge(ctx, engine, obj, size, offset_high,
+ dword, num + 1);
if (err)
break;
if (igt_timeout(end_time,
"%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
- __func__, engine->id, offset_low, offset_high, max_page_size))
+ __func__, engine->id, offset_low, offset_high,
+ max_page_size))
break;
}