summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2020-03-04 11:23:06 +0800
committerZhenyu Wang <zhenyuw@linux.intel.com>2020-03-06 09:56:15 +0800
commit8fde41076f6df53db84cb13051efed6482986ce3 (patch)
treea4e3d6d97a742a8dc3424ec09fb317a68d477453
parentaa444fc7fb887c205c31a38d398f95467a493e66 (diff)
drm/i915/gvt: Wean gvt off dev_priv->engine[]
Stop trying to escape out of the gvt layer to find the engine that we initially setup for use with gvt. Record the engines during initialisation and use them henceforth. add/remove: 1/4 grow/shrink: 22/28 up/down: 341/-1410 (-1069) [Zhenyu: rebase, fix nonpriv register check fault, fix gvt engine thread run failure.] Cc: Ding Zhuocheng <zhuocheng.ding@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Acked-by: Zhenyu Wang <zhenyuw@linux.intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20200304032307.2983-2-zhenyuw@linux.intel.com
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c204
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c99
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c91
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c111
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c13
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c235
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h9
11 files changed, 367 insertions, 412 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 73a2891114a4..9e065ad0658f 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -462,7 +462,7 @@ enum {
struct parser_exec_state {
struct intel_vgpu *vgpu;
- int ring_id;
+ const struct intel_engine_cs *engine;
int buf_type;
@@ -635,39 +635,42 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
},
};
-static inline u32 get_opcode(u32 cmd, int ring_id)
+static inline u32 get_opcode(u32 cmd, const struct intel_engine_cs *engine)
{
const struct decode_info *d_info;
- d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
if (d_info == NULL)
return INVALID_OP;
return cmd >> (32 - d_info->op_len);
}
-static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
- unsigned int opcode, int ring_id)
+static inline const struct cmd_info *
+find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode,
+ const struct intel_engine_cs *engine)
{
struct cmd_entry *e;
hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
- if (opcode == e->info->opcode && e->info->rings & BIT(ring_id))
+ if (opcode == e->info->opcode &&
+ e->info->rings & engine->mask)
return e->info;
}
return NULL;
}
-static inline const struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
- u32 cmd, int ring_id)
+static inline const struct cmd_info *
+get_cmd_info(struct intel_gvt *gvt, u32 cmd,
+ const struct intel_engine_cs *engine)
{
u32 opcode;
- opcode = get_opcode(cmd, ring_id);
+ opcode = get_opcode(cmd, engine);
if (opcode == INVALID_OP)
return NULL;
- return find_cmd_entry(gvt, opcode, ring_id);
+ return find_cmd_entry(gvt, opcode, engine);
}
static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
@@ -675,12 +678,12 @@ static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
}
-static inline void print_opcode(u32 cmd, int ring_id)
+static inline void print_opcode(u32 cmd, const struct intel_engine_cs *engine)
{
const struct decode_info *d_info;
int i;
- d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
if (d_info == NULL)
return;
@@ -709,10 +712,11 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
int cnt = 0;
int i;
- gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
- " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
- s->ring_id, s->ring_start, s->ring_start + s->ring_size,
- s->ring_head, s->ring_tail);
+ gvt_dbg_cmd(" vgpu%d RING%s: ring_start(%08lx) ring_end(%08lx)"
+ " ring_head(%08lx) ring_tail(%08lx)\n",
+ s->vgpu->id, s->engine->name,
+ s->ring_start, s->ring_start + s->ring_size,
+ s->ring_head, s->ring_tail);
gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
s->buf_type == RING_BUFFER_INSTRUCTION ?
@@ -729,7 +733,7 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
cmd_val(s, 2), cmd_val(s, 3));
- print_opcode(cmd_val(s, 0), s->ring_id);
+ print_opcode(cmd_val(s, 0), s->engine);
s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
@@ -840,7 +844,6 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
unsigned int data;
u32 ring_base;
u32 nopid;
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
if (!strcmp(cmd, "lri"))
data = cmd_val(s, index + 1);
@@ -850,7 +853,7 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
return -EINVAL;
}
- ring_base = dev_priv->engine[s->ring_id]->mmio_base;
+ ring_base = s->engine->mmio_base;
nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
@@ -926,9 +929,9 @@ static int cmd_reg_handler(struct parser_exec_state *s,
* update reg values in it into vregs, so LRIs in workload with
* inhibit context will restore with correct values
*/
- if (IS_GEN(gvt->dev_priv, 9) &&
- intel_gvt_mmio_is_in_ctx(gvt, offset) &&
- !strncmp(cmd, "lri", 3)) {
+ if (IS_GEN(s->engine->i915, 9) &&
+ intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+ !strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
/* check inhibit context */
@@ -964,7 +967,6 @@ static int cmd_handler_lri(struct parser_exec_state *s)
{
int i, ret = 0;
int cmd_len = cmd_length(s);
- struct intel_gvt *gvt = s->vgpu->gvt;
u32 valid_len = CMD_LEN(1);
/*
@@ -979,8 +981,8 @@ static int cmd_handler_lri(struct parser_exec_state *s)
}
for (i = 1; i < cmd_len; i += 2) {
- if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
- if (s->ring_id == BCS0 &&
+ if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) {
+ if (s->engine->id == BCS0 &&
cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
ret |= 0;
else
@@ -1001,9 +1003,9 @@ static int cmd_handler_lrr(struct parser_exec_state *s)
int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len; i += 2) {
- if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
ret |= ((cmd_reg_inhibit(s, i) ||
- (cmd_reg_inhibit(s, i + 1)))) ?
+ (cmd_reg_inhibit(s, i + 1)))) ?
-EBADRQC : 0;
if (ret)
break;
@@ -1029,7 +1031,7 @@ static int cmd_handler_lrm(struct parser_exec_state *s)
int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len;) {
- if (IS_BROADWELL(gvt->dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
if (ret)
break;
@@ -1141,7 +1143,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
if (ret)
return ret;
if (index_mode) {
- hws_pga = s->vgpu->hws_pga[s->ring_id];
+ hws_pga = s->vgpu->hws_pga[s->engine->id];
gma = hws_pga + gma;
patch_value(s, cmd_ptr(s, 2), gma);
val = cmd_val(s, 1) & (~(1 << 21));
@@ -1155,15 +1157,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
return ret;
if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
- set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify,
+ s->workload->pending_events);
return 0;
}
static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
{
- set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt,
+ s->workload->pending_events);
patch_value(s, cmd_ptr(s, 0), MI_NOOP);
return 0;
}
@@ -1213,7 +1215,7 @@ struct plane_code_mapping {
static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct plane_code_mapping gen8_plane_code[] = {
[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
@@ -1259,7 +1261,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1);
@@ -1318,13 +1320,12 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
static int gen8_check_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
u32 stride, tile;
if (!info->async_flip)
return 0;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(s->engine->i915) >= 9) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@@ -1347,7 +1348,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct intel_vgpu *vgpu = s->vgpu;
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
@@ -1378,11 +1379,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
static int decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
- if (IS_BROADWELL(dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
return gen8_decode_mi_display_flip(s, info);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (INTEL_GEN(s->engine->i915) >= 9)
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
@@ -1667,7 +1666,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
if (ret)
return ret;
if (index_mode) {
- hws_pga = s->vgpu->hws_pga[s->ring_id];
+ hws_pga = s->vgpu->hws_pga[s->engine->id];
gma = hws_pga + gma;
patch_value(s, cmd_ptr(s, 1), gma);
val = cmd_val(s, 0) & (~(1 << 21));
@@ -1676,8 +1675,8 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
}
/* Check notify bit */
if ((cmd_val(s, 0) & (1 << 8)))
- set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw,
+ s->workload->pending_events);
return ret;
}
@@ -1725,12 +1724,18 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
/* Decide privilege based on address space */
- if (cmd_val(s, 0) & (1 << 8) &&
- !(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
+ if (cmd_val(s, 0) & BIT(8) &&
+ !(s->vgpu->scan_nonprivbb & s->engine->mask))
return 0;
+
return 1;
}
+static const char *repr_addr_type(unsigned int type)
+{
+ return type == PPGTT_BUFFER ? "ppgtt" : "ggtt";
+}
+
static int find_bb_size(struct parser_exec_state *s,
unsigned long *bb_size,
unsigned long *bb_end_cmd_offset)
@@ -1753,24 +1758,24 @@ static int find_bb_size(struct parser_exec_state *s,
return -EFAULT;
cmd = cmd_val(s, 0);
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
do {
if (copy_gma_to_hva(s->vgpu, mm,
- gma, gma + 4, &cmd) < 0)
+ gma, gma + 4, &cmd) < 0)
return -EFAULT;
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
@@ -1799,12 +1804,12 @@ static int audit_bb_end(struct parser_exec_state *s, void *va)
u32 cmd = *(u32 *)va;
const struct cmd_info *info;
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
@@ -1857,7 +1862,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
if (bb->ppgtt)
start_offset = gma & ~I915_GTT_PAGE_MASK;
- bb->obj = i915_gem_object_create_shmem(s->vgpu->gvt->dev_priv,
+ bb->obj = i915_gem_object_create_shmem(s->engine->i915,
round_up(bb_size + start_offset,
PAGE_SIZE));
if (IS_ERR(bb->obj)) {
@@ -2666,25 +2671,25 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (cmd == MI_NOOP)
info = &cmd_info[mi_noop_index];
else
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
s->info = info;
- trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
+ trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va,
cmd_length(s), s->buf_type, s->buf_addr_type,
s->workload, info->name);
if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) {
ret = gvt_check_valid_cmd_length(cmd_length(s),
- info->valid_len);
+ info->valid_len);
if (ret)
return ret;
}
@@ -2781,7 +2786,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
s.vgpu = workload->vgpu;
- s.ring_id = workload->ring_id;
+ s.engine = workload->engine;
s.ring_start = workload->rb_start;
s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
s.ring_head = gma_head;
@@ -2790,8 +2795,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.workload = workload;
s.is_ctx_wa = false;
- if ((bypass_scan_mask & (1 << workload->ring_id)) ||
- gma_head == gma_tail)
+ if (bypass_scan_mask & workload->engine->mask || gma_head == gma_tail)
return 0;
ret = ip_gma_set(&s, gma_head);
@@ -2830,7 +2834,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
s.vgpu = workload->vgpu;
- s.ring_id = workload->ring_id;
+ s.engine = workload->engine;
s.ring_start = wa_ctx->indirect_ctx.guest_gma;
s.ring_size = ring_size;
s.ring_head = gma_head;
@@ -2855,7 +2859,6 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
struct intel_vgpu_submission *s = &vgpu->submission;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
void *shadow_ring_buffer_va;
- int ring_id = workload->ring_id;
int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@@ -2868,21 +2871,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size;
- if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
+ if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) {
void *p;
/* realloc the new ring buffer if needed */
- p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
- GFP_KERNEL);
+ p = krealloc(s->ring_scan_buffer[workload->engine->id],
+ workload->rb_len, GFP_KERNEL);
if (!p) {
gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
return -ENOMEM;
}
- s->ring_scan_buffer[ring_id] = p;
- s->ring_scan_buffer_size[ring_id] = workload->rb_len;
+ s->ring_scan_buffer[workload->engine->id] = p;
+ s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len;
}
- shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];
+ shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id];
/* get shadow ring buffer va */
workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
@@ -2940,7 +2943,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int ret = 0;
void *map;
- obj = i915_gem_object_create_shmem(workload->vgpu->gvt->dev_priv,
+ obj = i915_gem_object_create_shmem(workload->engine->i915,
roundup(ctx_size + CACHELINE_BYTES,
PAGE_SIZE));
if (IS_ERR(obj))
@@ -3029,30 +3032,14 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
-static const struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
- unsigned int opcode, unsigned long rings)
-{
- const struct cmd_info *info = NULL;
- unsigned int ring;
-
- for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
- info = find_cmd_entry(gvt, opcode, ring);
- if (info)
- break;
- }
- return info;
-}
-
static int init_cmd_table(struct intel_gvt *gvt)
{
+ unsigned int gen_type = intel_gvt_get_device_type(gvt);
int i;
- struct cmd_entry *e;
- const struct cmd_info *info;
- unsigned int gen_type;
-
- gen_type = intel_gvt_get_device_type(gvt);
for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
+ struct cmd_entry *e;
+
if (!(cmd_info[i].devices & gen_type))
continue;
@@ -3061,23 +3048,16 @@ static int init_cmd_table(struct intel_gvt *gvt)
return -ENOMEM;
e->info = &cmd_info[i];
- info = find_cmd_entry_any_ring(gvt,
- e->info->opcode, e->info->rings);
- if (info) {
- gvt_err("%s %s duplicated\n", e->info->name,
- info->name);
- kfree(e);
- return -EEXIST;
- }
if (cmd_info[i].opcode == OP_MI_NOOP)
mi_noop_index = i;
INIT_HLIST_NODE(&e->hlist);
add_cmd_entry(gvt, e);
gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
- e->info->name, e->info->opcode, e->info->flag,
- e->info->devices, e->info->rings);
+ e->info->name, e->info->opcode, e->info->flag,
+ e->info->devices, e->info->rings);
}
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index d6e7a1189bad..b8aa07c8bcc0 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -39,8 +39,7 @@
#define _EL_OFFSET_STATUS_BUF 0x370
#define _EL_OFFSET_STATUS_PTR 0x3A0
-#define execlist_ring_mmio(gvt, ring_id, offset) \
- (gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
+#define execlist_ring_mmio(e, offset) ((e)->mmio_base + (offset))
#define valid_context(ctx) ((ctx)->valid)
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
@@ -54,12 +53,12 @@ static int context_switch_events[] = {
[VECS0] = VECS_AS_CONTEXT_SWITCH,
};
-static int ring_id_to_context_switch_event(unsigned int ring_id)
+static int to_context_switch_event(const struct intel_engine_cs *engine)
{
- if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events)))
+ if (WARN_ON(engine->id >= ARRAY_SIZE(context_switch_events)))
return -EINVAL;
- return context_switch_events[ring_id];
+ return context_switch_events[engine->id];
}
static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
@@ -93,9 +92,8 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
struct execlist_ctx_descriptor_format *desc = execlist->running_context;
struct intel_vgpu *vgpu = execlist->vgpu;
struct execlist_status_format status;
- int ring_id = execlist->ring_id;
- u32 status_reg = execlist_ring_mmio(vgpu->gvt,
- ring_id, _EL_OFFSET_STATUS);
+ u32 status_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS);
status.ldw = vgpu_vreg(vgpu, status_reg);
status.udw = vgpu_vreg(vgpu, status_reg + 4);
@@ -124,21 +122,19 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
}
static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
- struct execlist_context_status_format *status,
- bool trigger_interrupt_later)
+ struct execlist_context_status_format *status,
+ bool trigger_interrupt_later)
{
struct intel_vgpu *vgpu = execlist->vgpu;
- int ring_id = execlist->ring_id;
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 write_pointer;
u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
unsigned long hwsp_gpa;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS_PTR);
- ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS_BUF);
+ ctx_status_ptr_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_PTR);
+ ctx_status_buf_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_BUF);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
@@ -161,26 +157,24 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
/* Update the CSB and CSB write pointer in HWSP */
hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
- vgpu->hws_pga[ring_id]);
+ vgpu->hws_pga[execlist->engine->id]);
if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
intel_gvt_hypervisor_write_gpa(vgpu,
- hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 +
- write_pointer * 8,
- status, 8);
+ hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
+ status, 8);
intel_gvt_hypervisor_write_gpa(vgpu,
- hwsp_gpa +
- intel_hws_csb_write_index(dev_priv) * 4,
- &write_pointer, 4);
+ hwsp_gpa + intel_hws_csb_write_index(execlist->engine->i915) * 4,
+ &write_pointer, 4);
}
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
- vgpu->id, write_pointer, offset, status->ldw, status->udw);
+ vgpu->id, write_pointer, offset, status->ldw, status->udw);
if (trigger_interrupt_later)
return;
intel_vgpu_trigger_virtual_event(vgpu,
- ring_id_to_context_switch_event(execlist->ring_id));
+ to_context_switch_event(execlist->engine));
}
static int emulate_execlist_ctx_schedule_out(
@@ -261,9 +255,8 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
struct intel_vgpu_execlist *execlist)
{
struct intel_vgpu *vgpu = execlist->vgpu;
- int ring_id = execlist->ring_id;
- u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS);
+ u32 status_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS);
struct execlist_status_format status;
status.ldw = vgpu_vreg(vgpu, status_reg);
@@ -379,7 +372,6 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct execlist_ctx_descriptor_format ctx[2];
- int ring_id = workload->ring_id;
int ret;
if (!workload->emulate_schedule_in)
@@ -388,7 +380,8 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
- ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx);
+ ret = emulate_execlist_schedule_in(&s->execlist[workload->engine->id],
+ ctx);
if (ret) {
gvt_vgpu_err("fail to emulate execlist schedule in\n");
return ret;
@@ -399,21 +392,21 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- int ring_id = workload->ring_id;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+ struct intel_vgpu_execlist *execlist =
+ &s->execlist[workload->engine->id];
struct intel_vgpu_workload *next_workload;
- struct list_head *next = workload_q_head(vgpu, ring_id)->next;
+ struct list_head *next = workload_q_head(vgpu, workload->engine)->next;
bool lite_restore = false;
int ret = 0;
- gvt_dbg_el("complete workload %p status %d\n", workload,
- workload->status);
+ gvt_dbg_el("complete workload %p status %d\n",
+ workload, workload->status);
- if (workload->status || (vgpu->resetting_eng & BIT(ring_id)))
+ if (workload->status || vgpu->resetting_eng & workload->engine->mask)
goto out;
- if (!list_empty(workload_q_head(vgpu, ring_id))) {
+ if (!list_empty(workload_q_head(vgpu, workload->engine))) {
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
next_workload = container_of(next,
@@ -436,14 +429,15 @@ out:
return ret;
}
-static int submit_context(struct intel_vgpu *vgpu, int ring_id,
- struct execlist_ctx_descriptor_format *desc,
- bool emulate_schedule_in)
+static int submit_context(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine,
+ struct execlist_ctx_descriptor_format *desc,
+ bool emulate_schedule_in)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_workload *workload = NULL;
- workload = intel_vgpu_create_workload(vgpu, ring_id, desc);
+ workload = intel_vgpu_create_workload(vgpu, engine, desc);
if (IS_ERR(workload))
return PTR_ERR(workload);
@@ -452,19 +446,20 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
workload->emulate_schedule_in = emulate_schedule_in;
if (emulate_schedule_in)
- workload->elsp_dwords = s->execlist[ring_id].elsp_dwords;
+ workload->elsp_dwords = s->execlist[engine->id].elsp_dwords;
gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
- emulate_schedule_in);
+ emulate_schedule_in);
intel_vgpu_queue_workload(workload);
return 0;
}
-int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+ struct intel_vgpu_execlist *execlist = &s->execlist[engine->id];
struct execlist_ctx_descriptor_format *desc[2];
int i, ret;
@@ -489,7 +484,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
for (i = 0; i < ARRAY_SIZE(desc); i++) {
if (!desc[i]->valid)
continue;
- ret = submit_context(vgpu, ring_id, desc[i], i == 0);
+ ret = submit_context(vgpu, engine, desc[i], i == 0);
if (ret) {
gvt_vgpu_err("failed to submit desc %d\n", i);
return ret;
@@ -504,22 +499,22 @@ inv_desc:
return -EINVAL;
}
-static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
+static void init_vgpu_execlist(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+ struct intel_vgpu_execlist *execlist = &s->execlist[engine->id];
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 ctx_status_ptr_reg;
memset(execlist, 0, sizeof(*execlist));
execlist->vgpu = vgpu;
- execlist->ring_id = ring_id;
+ execlist->engine = engine;
execlist->slot[0].index = 0;
execlist->slot[1].index = 1;
- ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS_PTR);
+ ctx_status_ptr_reg = execlist_ring_mmio(engine, _EL_OFFSET_STATUS_PTR);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
ctx_status_ptr.read_ptr = 0;
ctx_status_ptr.write_ptr = 0x7;
@@ -549,7 +544,7 @@ static void reset_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t tmp;
for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
- init_vgpu_execlist(vgpu, engine->id);
+ init_vgpu_execlist(vgpu, engine);
}
static int init_execlist(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 5c0c1fd30c83..d62cd14605a3 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -170,16 +170,17 @@ struct intel_vgpu_execlist {
struct intel_vgpu_execlist_slot *running_slot;
struct intel_vgpu_execlist_slot *pending_slot;
struct execlist_ctx_descriptor_format *running_context;
- int ring_id;
struct intel_vgpu *vgpu;
struct intel_vgpu_elsp_dwords elsp_dwords;
+ const struct intel_engine_cs *engine;
};
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
-int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 87bed32f1191..0ba3a7c8522f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -318,6 +318,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
mutex_init(&gvt->lock);
mutex_init(&gvt->sched_lock);
gvt->dev_priv = dev_priv;
+ dev_priv->gvt = gvt;
init_device_info(gvt);
@@ -376,7 +377,6 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
intel_gvt_debugfs_init(gvt);
gvt_dbg_core("gvt device initialization is done\n");
- dev_priv->gvt = gvt;
intel_gvt_host.dev = &dev_priv->drm.pdev->dev;
intel_gvt_host.initialized = true;
return 0;
@@ -402,6 +402,7 @@ out_clean_mmio_info:
out_clean_idr:
idr_destroy(&gvt->vgpu_idr);
kfree(gvt);
+ dev_priv->gvt = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 1793f6991fa8..952729b7a87b 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -142,25 +142,25 @@ static int new_mmio_info(struct intel_gvt *gvt,
}
/**
- * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
+ * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
- * Ring ID on success, negative error code if failed.
+ * The engine containing the offset within its mmio page.
*/
-int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
- unsigned int offset)
+const struct intel_engine_cs *
+intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
{
enum intel_engine_id id;
struct intel_engine_cs *engine;
offset &= ~GENMASK(11, 0);
- for_each_engine(engine, gvt->dev_priv, id) {
+ for_each_engine(engine, gvt->dev_priv, id)
if (engine->mmio_base == offset)
- return id;
- }
- return -ENODEV;
+ return engine;
+
+ return NULL;
}
#define offset_to_fence_num(offset) \
@@ -492,7 +492,7 @@ static i915_reg_t force_nonpriv_white_list[] = {
};
/* a simple bsearch */
-static inline bool in_whitelist(unsigned int reg)
+static inline bool in_whitelist(u32 reg)
{
int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
i915_reg_t *array = force_nonpriv_white_list;
@@ -514,26 +514,21 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
- u32 ring_base;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- int ret = -EINVAL;
+ const struct intel_engine_cs *engine =
+ intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
- if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) {
- gvt_err("vgpu(%d) ring %d Invalid FORCE_NONPRIV offset %x(%dB)\n",
- vgpu->id, ring_id, offset, bytes);
- return ret;
+ if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) {
+ gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
+ vgpu->id, offset, bytes);
+ return -EINVAL;
}
- rin