summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorfred gao <fred.gao@intel.com>2017-11-14 17:09:35 +0800
committerZhenyu Wang <zhenyuw@linux.intel.com>2017-11-28 17:24:20 +0800
commitc3c80f0736bb2767fab983d1ae53252b2ddd405d (patch)
tree9946385bece8895b1d9d80ec2c69183ebf91c769 /drivers/gpu
parentbf3a26b3cba59e2eee91cec3de6fdb4e8e670295 (diff)
drm/i915/gvt: Move request alloc to dispatch_workload path only
Previously the performance is improved through the workload auditing and shadowing ahead of vGPU scheduling, however, there is the case that more requests are allocated in submit_context before the previous request is added, the timeline will hold its seqno which is later. This patch is to move the request alloc to dispatch_workload function, where is the same place as request is added. It will fix the issue of kernel BUG for (timeline->seqno != request->fence.seqno) check when add_request. Fixes: 89ea20b930cb ("drm/i915/gvt: Factor out scan and shadow from workload dispatch") Signed-off-by: Chuanxiao Dong <chuanxiao.dong@intel.com> Signed-off-by: fred gao <fred.gao@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com> (cherry picked from commit f2880e04f3a5419366926182fc97a3c2e4fd8f2a)
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h3
3 files changed, 29 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 4427be18e4a9..940cdaaa3f24 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -496,6 +496,12 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
goto err_unpin_mm;
}
+ ret = intel_gvt_generate_request(workload);
+ if (ret) {
+ gvt_vgpu_err("fail to generate request\n");
+ goto err_unpin_mm;
+ }
+
ret = prepare_shadow_batch_buffer(workload);
if (ret) {
gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index f031234290f6..3ac1dc97a7a0 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -254,7 +254,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_ring *ring;
int ret;
@@ -300,6 +299,26 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
ret = populate_shadow_context(workload);
if (ret)
goto err_unpin;
+ workload->shadowed = true;
+ return 0;
+
+err_unpin:
+ engine->context_unpin(engine, shadow_ctx);
+err_shadow:
+ release_shadow_wa_ctx(&workload->wa_ctx);
+err_scan:
+ return ret;
+}
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
+{
+ int ring_id = workload->ring_id;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+ struct drm_i915_gem_request *rq;
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+ int ret;
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
@@ -314,14 +333,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
ret = copy_workload_to_ring_buffer(workload);
if (ret)
goto err_unpin;
- workload->shadowed = true;
return 0;
err_unpin:
engine->context_unpin(engine, shadow_ctx);
-err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 2d694f6c0907..b9f872204d7e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -142,4 +142,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload);
+
#endif