Blob Blame History Raw
From 2ceddb189306443a8d0d300de5b2bdf496749548 Mon Sep 17 00:00:00 2001
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
Date: Fri, 3 Nov 2017 15:18:12 +0000
Subject: [PATCH] drm/i915/guc: Split GuC firmware xfer function into clear steps
Git-commit: 2ceddb189306443a8d0d300de5b2bdf496749548
Patch-mainline: v4.16-rc1
References: FATE#322643 bsc#1055900

Transfer of GuC firmware requires few steps that currently
are implemented in two large functions. Split this code into
smaller functions to make these steps small and clear.
Also be prepared for potential DMA xfer step failure.

V2: rename function steps (Sagar)

Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Reviewed-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171103151816.62048-1-michal.wajdeczko@intel.com
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Takashi Iwai <tiwai@suse.de>

---
 drivers/gpu/drm/i915/intel_guc_fw.c |  176 +++++++++++++++++++++---------------
 1 file changed, 105 insertions(+), 71 deletions(-)

--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -97,23 +97,55 @@ int intel_guc_fw_select(struct intel_guc
 	return 0;
 }
 
-/*
- * Read the GuC status register (GUC_STATUS) and store it in the
- * specified location; then return a boolean indicating whether
- * the value matches either of two values representing completion
- * of the GuC boot process.
- *
- * This is used for polling the GuC status in a wait_for()
- * loop below.
- */
-static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
-				      u32 *status)
+static void guc_prepare_xfer(struct intel_guc *guc)
 {
-	u32 val = I915_READ(GUC_STATUS);
-	u32 uk_val = val & GS_UKERNEL_MASK;
-	*status = val;
-	return (uk_val == GS_UKERNEL_READY ||
-		((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE));
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+	/* Enable MIA caching. GuC clock gating is disabled. */
+	I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
+
+	/* WaDisableMinuteIaClockGating:bxt */
+	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+		I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
+					      ~GUC_ENABLE_MIA_CLOCK_GATING));
+	}
+
+	/* WaC6DisallowByGfxPause:bxt */
+	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+		I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
+
+	if (IS_GEN9_LP(dev_priv))
+		I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+	else
+		I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
+
+	if (IS_GEN9(dev_priv)) {
+		/* DOP Clock Gating Enable for GuC clocks */
+		I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
+					    I915_READ(GEN7_MISCCPCTL)));
+
+		/* allows for 5us (in 10ns units) before GT can go to RC6 */
+		I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
+	}
+}
+
+/* Copy RSA signature from the fw image to HW for verification */
+static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct intel_uc_fw *guc_fw = &guc->fw;
+	struct sg_table *sg = vma->pages;
+	u32 rsa[UOS_RSA_SCRATCH_MAX_COUNT];
+	int i;
+
+	if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa),
+			       guc_fw->rsa_offset) != sizeof(rsa))
+		return -EINVAL;
+
+	for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
+		I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
+
+	return 0;
 }
 
 /*
@@ -122,29 +154,17 @@ static inline bool guc_ucode_response(st
  * Architecturally, the DMA engine is bidirectional, and can potentially even
  * transfer between GTT locations. This functionality is left out of the API
  * for now as there is no need for it.
- *
- * Note that GuC needs the CSS header plus uKernel code to be copied by the
- * DMA engine in one operation, whereas the RSA signature is loaded via MMIO.
  */
-static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
-			      struct i915_vma *vma)
+static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
 {
-	struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	struct intel_uc_fw *guc_fw = &guc->fw;
 	unsigned long offset;
-	struct sg_table *sg = vma->pages;
-	u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
-	int i, ret = 0;
 
-	/* where RSA signature starts */
-	offset = guc_fw->rsa_offset;
-
-	/* Copy RSA signature from the fw image to HW for verification */
-	sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa), offset);
-	for (i = 0; i < UOS_RSA_SCRATCH_MAX_COUNT; i++)
-		I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
-
-	/* The header plus uCode will be copied to WOPCM via DMA, excluding any
-	 * other components */
+	/*
+	 * The header plus uCode will be copied to WOPCM via DMA, excluding any
+	 * other components
+	 */
 	I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
 
 	/* Set the source address for the new blob */
@@ -162,33 +182,57 @@ static int guc_ucode_xfer_dma(struct drm
 	/* Finally start the DMA */
 	I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
 
+	return 0;
+}
+
+/*
+ * Read the GuC status register (GUC_STATUS) and store it in the
+ * specified location; then return a boolean indicating whether
+ * the value matches either of two values representing completion
+ * of the GuC boot process.
+ *
+ * This is used for polling the GuC status in a wait_for()
+ * loop below.
+ */
+static inline bool guc_ready(struct intel_guc *guc, u32 *status)
+{
+	struct drm_i915_private *dev_priv = guc_to_i915(guc);
+	u32 val = I915_READ(GUC_STATUS);
+	u32 uk_val = val & GS_UKERNEL_MASK;
+
+	*status = val;
+	return (uk_val == GS_UKERNEL_READY) ||
+		((val & GS_MIA_CORE_STATE) && (uk_val == GS_UKERNEL_LAPIC_DONE));
+}
+
+static int guc_wait_ucode(struct intel_guc *guc)
+{
+	u32 status;
+	int ret;
+
 	/*
-	 * Wait for the DMA to complete & the GuC to start up.
+	 * Wait for the GuC to start up.
 	 * NB: Docs recommend not using the interrupt for completion.
 	 * Measurements indicate this should take no more than 20ms, so a
 	 * timeout here indicates that the GuC has failed and is unusable.
 	 * (Higher levels of the driver will attempt to fall back to
 	 * execlist mode if this happens.)
 	 */
-	ret = wait_for(guc_ucode_response(dev_priv, &status), 100);
-
-	DRM_DEBUG_DRIVER("DMA status 0x%x, GuC status 0x%x\n",
-			I915_READ(DMA_CTRL), status);
+	ret = wait_for(guc_ready(guc, &status), 100);
+	DRM_DEBUG_DRIVER("GuC status %#x\n", status);
 
 	if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
 		DRM_ERROR("GuC firmware signature verification failed\n");
 		ret = -ENOEXEC;
 	}
 
-	DRM_DEBUG_DRIVER("returning %d\n", ret);
-
 	return ret;
 }
 
 /*
  * Load the GuC firmware blob into the MinuteIA.
  */
-static int guc_ucode_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
+static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
 {
 	struct intel_guc *guc = container_of(guc_fw, struct intel_guc, fw);
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -198,34 +242,24 @@ static int guc_ucode_xfer(struct intel_u
 
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
-	/* Enable MIA caching. GuC clock gating is disabled. */
-	I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
-
-	/* WaDisableMinuteIaClockGating:bxt */
-	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
-		I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
-					      ~GUC_ENABLE_MIA_CLOCK_GATING));
-	}
-
-	/* WaC6DisallowByGfxPause:bxt */
-	if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
-		I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
-
-	if (IS_GEN9_LP(dev_priv))
-		I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
-	else
-		I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
-
-	if (IS_GEN9(dev_priv)) {
-		/* DOP Clock Gating Enable for GuC clocks */
-		I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
-					    I915_READ(GEN7_MISCCPCTL)));
+	guc_prepare_xfer(guc);
 
-		/* allows for 5us (in 10ns units) before GT can go to RC6 */
-		I915_WRITE(GUC_ARAT_C6DIS, 0x1FF);
-	}
-
-	ret = guc_ucode_xfer_dma(dev_priv, vma);
+	/*
+	 * Note that GuC needs the CSS header plus uKernel code to be copied
+	 * by the DMA engine in one operation, whereas the RSA signature is
+	 * loaded via MMIO.
+	 */
+	ret = guc_xfer_rsa(guc, vma);
+	if (ret)
+		DRM_WARN("GuC firmware signature xfer error %d\n", ret);
+
+	ret = guc_xfer_ucode(guc, vma);
+	if (ret)
+		DRM_WARN("GuC firmware code xfer error %d\n", ret);
+
+	ret = guc_wait_ucode(guc);
+	if (ret)
+		DRM_ERROR("GuC firmware xfer error %d\n", ret);
 
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 
@@ -247,5 +281,5 @@ static int guc_ucode_xfer(struct intel_u
  */
 int intel_guc_fw_upload(struct intel_guc *guc)
 {
-	return intel_uc_fw_upload(&guc->fw, guc_ucode_xfer);
+	return intel_uc_fw_upload(&guc->fw, guc_fw_xfer);
 }