Blob Blame History Raw
From: Chris Wilson <chris@chris-wilson.co.uk>
Date: Mon, 12 Feb 2018 10:24:15 +0000
Subject: drm/i915: Hold rpm wakeref for printing the engine's register state
Git-commit: 3ceda3a4a856336c5c902a6cfbacff2c4effaa21
Patch-mainline: v4.17-rc1
References: FATE#326289 FATE#326079 FATE#326049 FATE#322398 FATE#326166

When dumping the engine, we print out the current register values. This
requires the rpm wakeref. If the device is alseep, we can assume the
engine is asleep (and the register state is uninteresting) so skip and
only acquire the rpm wakeref if the device is already awake.

Reported-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180212102415.24246-1-chris@chris-wilson.co.uk

Acked-by: Petr Tesarik <ptesarik@suse.com>
---
 drivers/gpu/drm/i915/intel_engine_cs.c  |  162 +++++++++++++++++---------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |    6 -
 2 files changed, 94 insertions(+), 74 deletions(-)

--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -691,7 +691,7 @@ void intel_engine_cleanup_common(struct
 	engine->context_unpin(engine, engine->i915->kernel_context);
 }
 
-u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
+u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 	u64 acthd;
@@ -707,7 +707,7 @@ u64 intel_engine_get_active_head(struct
 	return acthd;
 }
 
-u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine)
+u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 	u64 bbaddr;
@@ -1705,73 +1705,20 @@ static void hexdump(struct drm_printer *
 	}
 }
 
-void intel_engine_dump(struct intel_engine_cs *engine,
-		       struct drm_printer *m,
-		       const char *header, ...)
+static void intel_engine_print_registers(const struct intel_engine_cs *engine,
+					 struct drm_printer *m)
 {
-	struct intel_breadcrumbs * const b = &engine->breadcrumbs;
-	const struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct i915_gpu_error * const error = &engine->i915->gpu_error;
 	struct drm_i915_private *dev_priv = engine->i915;
-	struct drm_i915_gem_request *rq;
-	struct rb_node *rb;
-	char hdr[80];
+	const struct intel_engine_execlists * const execlists =
+		&engine->execlists;
 	u64 addr;
 
-	if (header) {
-		va_list ap;
-
-		va_start(ap, header);
-		drm_vprintf(m, header, &ap);
-		va_end(ap);
-	}
-
-	if (i915_terminally_wedged(&engine->i915->gpu_error))
-		drm_printf(m, "*** WEDGED ***\n");
-
-	drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
-		   intel_engine_get_seqno(engine),
-		   intel_engine_last_submit(engine),
-		   engine->hangcheck.seqno,
-		   jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
-		   engine->timeline->inflight_seqnos);
-	drm_printf(m, "\tReset count: %d (global %d)\n",
-		   i915_reset_engine_count(error, engine),
-		   i915_reset_count(error));
-
-	rcu_read_lock();
-
-	drm_printf(m, "\tRequests:\n");
-
-	rq = list_first_entry(&engine->timeline->requests,
-			      struct drm_i915_gem_request, link);
-	if (&rq->link != &engine->timeline->requests)
-		print_request(m, rq, "\t\tfirst  ");
-
-	rq = list_last_entry(&engine->timeline->requests,
-			     struct drm_i915_gem_request, link);
-	if (&rq->link != &engine->timeline->requests)
-		print_request(m, rq, "\t\tlast   ");
-
-	rq = i915_gem_find_active_request(engine);
-	if (rq) {
-		print_request(m, rq, "\t\tactive ");
-		drm_printf(m,
-			   "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
-			   rq->head, rq->postfix, rq->tail,
-			   rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
-			   rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
-	}
-
-	drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n",
-		   I915_READ(RING_START(engine->mmio_base)),
-		   rq ? i915_ggtt_offset(rq->ring->vma) : 0);
-	drm_printf(m, "\tRING_HEAD:  0x%08x [0x%08x]\n",
-		   I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR,
-		   rq ? rq->ring->head : 0);
-	drm_printf(m, "\tRING_TAIL:  0x%08x [0x%08x]\n",
-		   I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR,
-		   rq ? rq->ring->tail : 0);
+	drm_printf(m, "\tRING_START: 0x%08x\n",
+		   I915_READ(RING_START(engine->mmio_base)));
+	drm_printf(m, "\tRING_HEAD:  0x%08x\n",
+		   I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
+	drm_printf(m, "\tRING_TAIL:  0x%08x\n",
+		   I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
 	drm_printf(m, "\tRING_CTL:   0x%08x%s\n",
 		   I915_READ(RING_CTL(engine->mmio_base)),
 		   I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
@@ -1780,6 +1727,11 @@ void intel_engine_dump(struct intel_engi
 			   I915_READ(RING_MI_MODE(engine->mmio_base)),
 			   I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
 	}
+
+	if (INTEL_GEN(dev_priv) >= 6) {
+		drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
+	}
+
 	if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
 		drm_printf(m, "\tSYNC_0: 0x%08x\n",
 			   I915_READ(RING_SYNC_0(engine->mmio_base)));
@@ -1790,8 +1742,6 @@ void intel_engine_dump(struct intel_engi
 				   I915_READ(RING_SYNC_2(engine->mmio_base)));
 	}
 
-	rcu_read_unlock();
-
 	addr = intel_engine_get_active_head(engine);
 	drm_printf(m, "\tACTHD:  0x%08x_%08x\n",
 		   upper_32_bits(addr), lower_32_bits(addr));
@@ -1853,10 +1803,13 @@ void intel_engine_dump(struct intel_engi
 
 		rcu_read_lock();
 		for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
+			struct drm_i915_gem_request *rq;
 			unsigned int count;
 
 			rq = port_unpack(&execlists->port[idx], &count);
 			if (rq) {
+				char hdr[80];
+
 				snprintf(hdr, sizeof(hdr),
 					 "\t\tELSP[%d] count=%d, rq: ",
 					 idx, count);
@@ -1875,6 +1828,77 @@ void intel_engine_dump(struct intel_engi
 		drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
 			   I915_READ(RING_PP_DIR_DCLV(engine)));
 	}
+}
+
+void intel_engine_dump(struct intel_engine_cs *engine,
+		       struct drm_printer *m,
+		       const char *header, ...)
+{
+	struct intel_breadcrumbs * const b = &engine->breadcrumbs;
+	const struct intel_engine_execlists * const execlists = &engine->execlists;
+	struct i915_gpu_error * const error = &engine->i915->gpu_error;
+	struct drm_i915_gem_request *rq;
+	struct rb_node *rb;
+
+	if (header) {
+		va_list ap;
+
+		va_start(ap, header);
+		drm_vprintf(m, header, &ap);
+		va_end(ap);
+	}
+
+	if (i915_terminally_wedged(&engine->i915->gpu_error))
+		drm_printf(m, "*** WEDGED ***\n");
+
+	drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
+		   intel_engine_get_seqno(engine),
+		   intel_engine_last_submit(engine),
+		   engine->hangcheck.seqno,
+		   jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
+		   engine->timeline->inflight_seqnos);
+	drm_printf(m, "\tReset count: %d (global %d)\n",
+		   i915_reset_engine_count(error, engine),
+		   i915_reset_count(error));
+
+	rcu_read_lock();
+
+	drm_printf(m, "\tRequests:\n");
+
+	rq = list_first_entry(&engine->timeline->requests,
+			      struct drm_i915_gem_request, link);
+	if (&rq->link != &engine->timeline->requests)
+		print_request(m, rq, "\t\tfirst  ");
+
+	rq = list_last_entry(&engine->timeline->requests,
+			     struct drm_i915_gem_request, link);
+	if (&rq->link != &engine->timeline->requests)
+		print_request(m, rq, "\t\tlast   ");
+
+	rq = i915_gem_find_active_request(engine);
+	if (rq) {
+		print_request(m, rq, "\t\tactive ");
+		drm_printf(m,
+			   "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n",
+			   rq->head, rq->postfix, rq->tail,
+			   rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
+			   rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
+		drm_printf(m, "\t\tring->start: 0x%08x\n",
+			   i915_ggtt_offset(rq->ring->vma));
+		drm_printf(m, "\t\tring->head:  0x%08x\n",
+			   rq->ring->head);
+		drm_printf(m, "\t\tring->tail:  0x%08x\n",
+			   rq->ring->tail);
+	}
+
+	rcu_read_unlock();
+
+	if (intel_runtime_pm_get_if_in_use(engine->i915)) {
+		intel_engine_print_registers(engine, m);
+		intel_runtime_pm_put(engine->i915);
+	} else {
+		drm_printf(m, "\tDevice is asleep; skipping register dump\n");
+	}
 
 	spin_lock_irq(&engine->timeline->lock);
 	list_for_each_entry(rq, &engine->timeline->requests, link)
@@ -1897,10 +1921,6 @@ void intel_engine_dump(struct intel_engi
 	}
 	spin_unlock_irq(&b->rb_lock);
 
-	if (INTEL_GEN(dev_priv) >= 6) {
-		drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
-	}
-
 	drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n",
 		   engine->irq_posted,
 		   yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -658,7 +658,7 @@ intel_engine_flag(const struct intel_eng
 }
 
 static inline u32
-intel_read_status_page(struct intel_engine_cs *engine, int reg)
+intel_read_status_page(const struct intel_engine_cs *engine, int reg)
 {
 	/* Ensure that the compiler doesn't optimize away the load. */
 	return READ_ONCE(engine->status_page.page_addr[reg]);
@@ -816,8 +816,8 @@ int intel_init_bsd_ring_buffer(struct in
 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
 
-u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
-u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine);
+u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
+u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
 
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
 {