From 4671ea204179dc705d4b0c31045e6acdfd6e59e8 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Fri, 23 Jun 2017 15:45:32 +0800 Subject: [PATCH] drm/i915/gvt: Optimize ring siwtch 2x faster again by light weight mmio access wrapper Git-commit: 4671ea204179dc705d4b0c31045e6acdfd6e59e8 Patch-mainline: v4.14-rc1 References: FATE#322643 bsc#1055900 The I915_READ/WRITE is not only a mmio read/write, it also contains debug checking and Forcewake domain lookup. This is too heavy for GVT ring switch case which access batch of mmio registers on ring switch. We can handle Forcewake manually and use the raw i915_read/write instead. The benefit from this is 2x faster mmio switch performance. Before After cycles ~550000 ~250000 V2: Use existing I915_READ_FW/I915_WRITE_FW macro. (zhenyu) Signed-off-by: Changbin Du Reviewed-by: Zhenyu Wang Signed-off-by: Zhenyu Wang Acked-by: Takashi Iwai --- drivers/gpu/drm/i915/gvt/render.c | 39 +++++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 13 deletions(-) --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c @@ -207,7 +207,7 @@ static void load_mocs(struct intel_vgpu offset.reg = regs[ring_id]; for (i = 0; i < 64; i++) { - gen9_render_mocs[ring_id][i] = I915_READ(offset); + gen9_render_mocs[ring_id][i] = I915_READ_FW(offset); I915_WRITE(offset, vgpu_vreg(vgpu, offset)); offset.reg += 4; } @@ -215,8 +215,8 @@ static void load_mocs(struct intel_vgpu if (ring_id == RCS) { l3_offset.reg = 0xb020; for (i = 0; i < 32; i++) { - gen9_render_mocs_L3[i] = I915_READ(l3_offset); - I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset)); + gen9_render_mocs_L3[i] = I915_READ_FW(l3_offset); + I915_WRITE_FW(l3_offset, vgpu_vreg(vgpu, l3_offset)); l3_offset.reg += 4; } } @@ -240,16 +240,16 @@ static void restore_mocs(struct intel_vg offset.reg = regs[ring_id]; for (i = 0; i < 64; i++) { - vgpu_vreg(vgpu, offset) = I915_READ(offset); - I915_WRITE(offset, gen9_render_mocs[ring_id][i]); + vgpu_vreg(vgpu, offset) = I915_READ_FW(offset); + I915_WRITE_FW(offset, gen9_render_mocs[ring_id][i]); offset.reg += 4; } if (ring_id == RCS) { l3_offset.reg = 0xb020; for (i = 0; i < 32; i++) { - vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset); - I915_WRITE(l3_offset, gen9_render_mocs_L3[i]); + vgpu_vreg(vgpu, l3_offset) = I915_READ_FW(l3_offset); + I915_WRITE_FW(l3_offset, gen9_render_mocs_L3[i]); l3_offset.reg += 4; } } @@ -284,7 +284,7 @@ static void switch_mmio_to_vgpu(struct i if (mmio->ring_id != ring_id) continue; - mmio->value = I915_READ(mmio->reg); + mmio->value = I915_READ_FW(mmio->reg); /* * if it is an inhibit context, load in_context mmio @@ -301,7 +301,7 @@ static void switch_mmio_to_vgpu(struct i else v = vgpu_vreg(vgpu, mmio->reg); - I915_WRITE(mmio->reg, v); + I915_WRITE_FW(mmio->reg, v); last_reg = mmio->reg; trace_render_mmio(vgpu->id, "load", @@ -311,7 +311,7 @@ static void switch_mmio_to_vgpu(struct i /* Make sure the swiched MMIOs has taken effect. */ if (likely(INTEL_GVT_MMIO_OFFSET(last_reg))) - POSTING_READ(last_reg); + I915_READ_FW(last_reg); handle_tlb_pending_event(vgpu, ring_id); } @@ -338,7 +338,7 @@ static void switch_mmio_to_host(struct i if (mmio->ring_id != ring_id) continue; - vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg); + vgpu_vreg(vgpu, mmio->reg) = I915_READ_FW(mmio->reg); if (mmio->mask) { vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16); @@ -349,7 +349,7 @@ static void switch_mmio_to_host(struct i if (mmio->in_context) continue; - I915_WRITE(mmio->reg, v); + I915_WRITE_FW(mmio->reg, v); last_reg = mmio->reg; trace_render_mmio(vgpu->id, "restore", @@ -359,7 +359,7 @@ static void switch_mmio_to_host(struct i /* Make sure the swiched MMIOs has taken effect. */ if (likely(INTEL_GVT_MMIO_OFFSET(last_reg))) - POSTING_READ(last_reg); + I915_READ_FW(last_reg); } /** @@ -374,12 +374,23 @@ static void switch_mmio_to_host(struct i void intel_gvt_switch_mmio(struct intel_vgpu *pre, struct intel_vgpu *next, int ring_id) { + struct drm_i915_private *dev_priv; + if (WARN_ON(!pre && !next)) return; gvt_dbg_render("switch ring %d from %s to %s\n", ring_id, pre ? "vGPU" : "host", next ? "vGPU" : "HOST"); + dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv; + + /** + * We are using raw mmio access wrapper to improve the + * performace for batch mmio read/write, so we need + * handle forcewake mannually. + */ + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + /** * TODO: Optimize for vGPU to vGPU switch by merging * switch_mmio_to_host() and switch_mmio_to_vgpu(). @@ -389,4 +400,6 @@ void intel_gvt_switch_mmio(struct intel_ if (next) switch_mmio_to_vgpu(next, ring_id); + + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); }