Blob Blame History Raw
From 805446c8347c9e743912cb7acf795683d9af7972 Mon Sep 17 00:00:00 2001
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Date: Wed, 27 Mar 2019 14:23:28 +0000
Subject: [PATCH] drm/i915: Introduce concept of a sub-platform
Mime-version: 1.0
Content-type: text/plain; charset=UTF-8
Content-transfer-encoding: 8bit
Git-commit: 805446c8347c9e743912cb7acf795683d9af7972
Patch-mainline: v5.2-rc1
References: jsc#SLE-4986

Concept of a sub-platform already exist in our code (like ULX and ULT
platform variants and similar),implemented via the macros which check a
list of device ids to determine a match.

With this patch we consolidate device ids checking into a single function
called during early driver load.

A few low bits in the platform mask are reserved for sub-platform
identification and defined as a per-platform namespace.

At the same time it future proofs the platform_mask handling by preparing
the code for easy extending, and tidies the very verbose WARN strings
generated when IS_PLATFORM macros are embedded into a WARN type
statements.

V2: Fixed IS_SUBPLATFORM. Updated commit msg.
V3: Chris was right, there is an ordering problem.

V4: * Catch-up with new sub-platforms. * Rebase for RUNTIME_INFO. * Drop subplatform mask union tricks and convert platform_mask to an   array for extensibility.

V5: * Fix subplatform check. * Protect against forgetting to expand subplatform bits. * Remove platform enum tallying. * Add subplatform to error state. (Chris) * Drop macros and just use static inlines. * Remove redundant IRONLAKE_M. (Ville)

V6: * Split out Ironlake change. * Optimize subplatform check. * Use __always_inline. (Lucas) * Add platform_mask comment. (Paulo) * Pass stored runtime info in error capture. (Chris)

V7: * Rebased for new AML ULX device id. * Bump platform mask array size for EHL. * Stop mentioning device ids in intel_device_subplatform_init by using   the trick of splitting macros i915_pciids.h. (Jani) * AML seems to be either a subplatform of KBL or CFL so express it like   that.

V8: * Use one device id table per subplatform. (Jani)

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Jani Nikula <jani.nikula@intel.com>
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: Jose Souza <jose.souza@intel.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190327142328.31780-1-tvrtko.ursulin@linux.intel.com
Acked-by: Takashi Iwai <tiwai@suse.de>

---
 drivers/gpu/drm/i915/i915_drv.c          |    4 -
 drivers/gpu/drm/i915/i915_drv.h          |  121 ++++++++++++++++++++++---------
 drivers/gpu/drm/i915/i915_gpu_error.c    |    3 
 drivers/gpu/drm/i915/i915_pci.c          |    2 
 drivers/gpu/drm/i915/intel_device_info.c |   93 +++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_device_info.h |   27 ++++++
 6 files changed, 211 insertions(+), 39 deletions(-)

--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -881,6 +881,8 @@ static int i915_driver_init_early(struct
 	if (i915_inject_load_failure())
 		return -ENODEV;
 
+	intel_device_info_subplatform_init(dev_priv);
+
 	spin_lock_init(&dev_priv->irq_lock);
 	spin_lock_init(&dev_priv->gpu_error.lock);
 	mutex_init(&dev_priv->backlight_lock);
@@ -1346,8 +1348,6 @@ i915_driver_create(struct pci_dev *pdev,
 	memcpy(device_info, match_info, sizeof(*device_info));
 	RUNTIME_INFO(i915)->device_id = pdev->device;
 
-	BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
-		     sizeof(device_info->platform_mask) * BITS_PER_BYTE);
 	BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
 
 	return i915;
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2370,7 +2370,67 @@ intel_info(const struct drm_i915_private
 #define IS_REVID(p, since, until) \
 	(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
 
-#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p))
+static __always_inline unsigned int
+__platform_mask_index(const struct intel_runtime_info *info,
+		      enum intel_platform p)
+{
+	const unsigned int pbits =
+		BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
+
+	/* Expand the platform_mask array if this fails. */
+	BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
+		     pbits * ARRAY_SIZE(info->platform_mask));
+
+	return p / pbits;
+}
+
+static __always_inline unsigned int
+__platform_mask_bit(const struct intel_runtime_info *info,
+		    enum intel_platform p)
+{
+	const unsigned int pbits =
+		BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
+
+	return p % pbits + INTEL_SUBPLATFORM_BITS;
+}
+
+static inline u32
+intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
+{
+	const unsigned int pi = __platform_mask_index(info, p);
+
+	return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
+}
+
+static __always_inline bool
+IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
+{
+	const struct intel_runtime_info *info = RUNTIME_INFO(i915);
+	const unsigned int pi = __platform_mask_index(info, p);
+	const unsigned int pb = __platform_mask_bit(info, p);
+
+	BUILD_BUG_ON(!__builtin_constant_p(p));
+
+	return info->platform_mask[pi] & BIT(pb);
+}
+
+static __always_inline bool
+IS_SUBPLATFORM(const struct drm_i915_private *i915,
+	       enum intel_platform p, unsigned int s)
+{
+	const struct intel_runtime_info *info = RUNTIME_INFO(i915);
+	const unsigned int pi = __platform_mask_index(info, p);
+	const unsigned int pb = __platform_mask_bit(info, p);
+	const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
+	const u32 mask = info->platform_mask[pi];
+
+	BUILD_BUG_ON(!__builtin_constant_p(p));
+	BUILD_BUG_ON(!__builtin_constant_p(s));
+	BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
+
+	/* Shift and test on the MSB position so sign flag can be used. */
+	return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
+}
 
 #define IS_MOBILE(dev_priv)	(INTEL_INFO(dev_priv)->is_mobile)
 
@@ -2408,41 +2468,30 @@ intel_info(const struct drm_i915_private
 #define IS_ICELAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_ICELAKE)
 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
 				    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
-#define IS_BDW_ULT(dev_priv)	(IS_BROADWELL(dev_priv) && \
-				 ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 ||	\
-				 (INTEL_DEVID(dev_priv) & 0xf) == 0xb ||	\
-				 (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
-/* ULX machines are also considered ULT. */
-#define IS_BDW_ULX(dev_priv)	(IS_BROADWELL(dev_priv) && \
-				 (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
+#define IS_BDW_ULT(dev_priv) \
+	IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
+#define IS_BDW_ULX(dev_priv) \
+	IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
 #define IS_BDW_GT3(dev_priv)	(IS_BROADWELL(dev_priv) && \
 				 (dev_priv)->info.gt == 3)
-#define IS_HSW_ULT(dev_priv)	(IS_HASWELL(dev_priv) && \
-				 (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
+#define IS_HSW_ULT(dev_priv) \
+	IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
 #define IS_HSW_GT3(dev_priv)	(IS_HASWELL(dev_priv) && \
 				 (dev_priv)->info.gt == 3)
 /* ULX machines are also considered ULT. */
-#define IS_HSW_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x0A0E || \
-				 INTEL_DEVID(dev_priv) == 0x0A1E)
-#define IS_SKL_ULT(dev_priv)	(INTEL_DEVID(dev_priv) == 0x1906 || \
-				 INTEL_DEVID(dev_priv) == 0x1913 || \
-				 INTEL_DEVID(dev_priv) == 0x1916 || \
-				 INTEL_DEVID(dev_priv) == 0x1921 || \
-				 INTEL_DEVID(dev_priv) == 0x1926)
-#define IS_SKL_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x190E || \
-				 INTEL_DEVID(dev_priv) == 0x1915 || \
-				 INTEL_DEVID(dev_priv) == 0x191E)
-#define IS_KBL_ULT(dev_priv)	(INTEL_DEVID(dev_priv) == 0x5906 || \
-				 INTEL_DEVID(dev_priv) == 0x5913 || \
-				 INTEL_DEVID(dev_priv) == 0x5916 || \
-				 INTEL_DEVID(dev_priv) == 0x5921 || \
-				 INTEL_DEVID(dev_priv) == 0x5926)
-#define IS_KBL_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x590E || \
-				 INTEL_DEVID(dev_priv) == 0x5915 || \
-				 INTEL_DEVID(dev_priv) == 0x591E)
-#define IS_AML_ULX(dev_priv)	(INTEL_DEVID(dev_priv) == 0x591C || \
-				 INTEL_DEVID(dev_priv) == 0x87C0 || \
-				 INTEL_DEVID(dev_priv) == 0x87CA)
+#define IS_HSW_ULX(dev_priv) \
+	IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
+#define IS_SKL_ULT(dev_priv) \
+	IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_SKL_ULX(dev_priv) \
+	IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
+#define IS_KBL_ULT(dev_priv) \
+	IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_KBL_ULX(dev_priv) \
+	IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
+#define IS_AML_ULX(dev_priv) \
+	(IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_AML) || \
+	 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_AML))
 #define IS_SKL_GT2(dev_priv)	(IS_SKYLAKE(dev_priv) && \
 				 (dev_priv)->info.gt == 2)
 #define IS_SKL_GT3(dev_priv)	(IS_SKYLAKE(dev_priv) && \
@@ -2453,14 +2502,16 @@ intel_info(const struct drm_i915_private
 				 (dev_priv)->info.gt == 2)
 #define IS_KBL_GT3(dev_priv)	(IS_KABYLAKE(dev_priv) && \
 				 (dev_priv)->info.gt == 3)
-#define IS_CFL_ULT(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
-				 (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
+#define IS_CFL_ULT(dev_priv) \
+	IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
 #define IS_CFL_GT2(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
 				 (dev_priv)->info.gt == 2)
 #define IS_CFL_GT3(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
 				 (dev_priv)->info.gt == 3)
-#define IS_CNL_WITH_PORT_F(dev_priv)   (IS_CANNONLAKE(dev_priv) && \
-					(INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
+#define IS_CNL_WITH_PORT_F(dev_priv) \
+	IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
+#define IS_ICL_WITH_PORT_F(dev_priv) \
+	IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
 
 #define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
 
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -681,6 +681,9 @@ int i915_error_state_to_str(struct drm_i
 	err_printf(m, "Reset count: %u\n", error->reset_count);
 	err_printf(m, "Suspend count: %u\n", error->suspend_count);
 	err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
+	err_printf(m, "Subplatform: 0x%x\n",
+		   intel_subplatform(&error->runtime_info,
+				     error->device_info.platform));
 	err_print_pciid(m, error->i915);
 
 	err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -29,7 +29,7 @@
 #include "i915_drv.h"
 #include "i915_selftest.h"
 
-#define PLATFORM(x) .platform = (x), .platform_mask = BIT(x)
+#define PLATFORM(x) .platform = (x)
 #define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
 
 #define GEN_DEFAULT_PIPEOFFSETS \
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -718,6 +718,99 @@ static u32 read_timestamp_frequency(stru
 	return 0;
 }
 
+#undef INTEL_VGA_DEVICE
+#define INTEL_VGA_DEVICE(id, info) (id)
+
+static const u16 subplatform_ult_ids[] = {
+	INTEL_HSW_ULT_GT1_IDS(0),
+	INTEL_HSW_ULT_GT2_IDS(0),
+	INTEL_HSW_ULT_GT3_IDS(0),
+	INTEL_BDW_ULT_GT1_IDS(0),
+	INTEL_BDW_ULT_GT2_IDS(0),
+	INTEL_BDW_ULT_GT3_IDS(0),
+	INTEL_BDW_ULT_RSVD_IDS(0),
+	INTEL_SKL_ULT_GT1_IDS(0),
+	INTEL_SKL_ULT_GT2_IDS(0),
+	INTEL_SKL_ULT_GT3_IDS(0),
+	INTEL_KBL_ULT_GT1_IDS(0),
+	INTEL_KBL_ULT_GT2_IDS(0),
+	INTEL_KBL_ULT_GT3_IDS(0),
+	INTEL_CFL_U_GT2_IDS(0),
+	INTEL_CFL_U_GT3_IDS(0),
+	INTEL_WHL_U_GT1_IDS(0),
+	INTEL_WHL_U_GT2_IDS(0),
+	INTEL_WHL_U_GT3_IDS(0)
+};
+
+static const u16 subplatform_ulx_ids[] = {
+	INTEL_HSW_ULX_GT1_IDS(0),
+	INTEL_HSW_ULX_GT2_IDS(0),
+	INTEL_BDW_ULX_GT1_IDS(0),
+	INTEL_BDW_ULX_GT2_IDS(0),
+	INTEL_BDW_ULX_GT3_IDS(0),
+	INTEL_BDW_ULX_RSVD_IDS(0),
+	INTEL_SKL_ULX_GT1_IDS(0),
+	INTEL_SKL_ULX_GT2_IDS(0),
+	INTEL_KBL_ULX_GT1_IDS(0),
+	INTEL_KBL_ULX_GT2_IDS(0)
+};
+
+static const u16 subplatform_aml_ids[] = {
+	INTEL_AML_KBL_GT2_IDS(0),
+	INTEL_AML_CFL_GT2_IDS(0)
+};
+
+static const u16 subplatform_portf_ids[] = {
+	INTEL_CNL_PORT_F_IDS(0),
+	INTEL_ICL_PORT_F_IDS(0)
+};
+
+static bool find_devid(u16 id, const u16 *p, unsigned int num)
+{
+	for (; num; num--, p++) {
+		if (*p == id)
+			return true;
+	}
+
+	return false;
+}
+
+void intel_device_info_subplatform_init(struct drm_i915_private *i915)
+{
+	const struct intel_device_info *info = INTEL_INFO(i915);
+	const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
+	const unsigned int pi = __platform_mask_index(rinfo, info->platform);
+	const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
+	u16 devid = INTEL_DEVID(i915);
+	u32 mask;
+
+	/* Make sure IS_<platform> checks are working. */
+	RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);
+
+	/* Find and mark subplatform bits based on the PCI device id. */
+	if (find_devid(devid, subplatform_ult_ids,
+		       ARRAY_SIZE(subplatform_ult_ids))) {
+		mask = BIT(INTEL_SUBPLATFORM_ULT);
+	} else if (find_devid(devid, subplatform_ulx_ids,
+			      ARRAY_SIZE(subplatform_ulx_ids))) {
+		mask = BIT(INTEL_SUBPLATFORM_ULX);
+		if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+			/* ULX machines are also considered ULT. */
+			mask |= BIT(INTEL_SUBPLATFORM_ULT);
+		}
+	} else if (find_devid(devid, subplatform_aml_ids,
+			      ARRAY_SIZE(subplatform_aml_ids))) {
+		mask = BIT(INTEL_SUBPLATFORM_AML);
+	} else if (find_devid(devid, subplatform_portf_ids,
+			      ARRAY_SIZE(subplatform_portf_ids))) {
+		mask = BIT(INTEL_SUBPLATFORM_PORTF);
+	}
+
+	GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);
+
+	RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
+}
+
 /**
  * intel_device_info_runtime_init - initialize runtime info
  * @info: intel device info struct
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -74,6 +74,21 @@ enum intel_platform {
 	INTEL_MAX_PLATFORMS
 };
 
+/*
+ * Subplatform bits share the same namespace per parent platform. In other words
+ * it is fine for the same bit to be used on multiple parent platforms.
+ */
+
+#define INTEL_SUBPLATFORM_BITS (3)
+
+/* HSW/BDW/SKL/KBL/CFL */
+#define INTEL_SUBPLATFORM_ULT	(0)
+#define INTEL_SUBPLATFORM_ULX	(1)
+#define INTEL_SUBPLATFORM_AML	(2)
+
+/* CNL/ICL */
+#define INTEL_SUBPLATFORM_PORTF	(0)
+
 #define DEV_INFO_FOR_EACH_FLAG(func) \
 	func(is_mobile); \
 	func(is_lp); \
@@ -150,7 +165,6 @@ struct intel_device_info {
 	intel_ring_mask_t ring_mask; /* Rings supported by the HW */
 
 	enum intel_platform platform;
-	u32 platform_mask;
 
 	unsigned int page_sizes; /* page sizes supported by the HW */
 
@@ -176,6 +190,16 @@ struct intel_device_info {
 };
 
 struct intel_runtime_info {
+	/*
+	 * Platform mask is used for optimizing or-ed IS_PLATFORM calls into
+	 * into single runtime conditionals, and also to provide groundwork
+	 * for future per platform, or per SKU build optimizations.
+	 *
+	 * Array can be extended when necessary if the corresponding
+	 * BUILD_BUG_ON is hit.
+	 */
+	u32 platform_mask[2];
+
 	u16 device_id;
 
 	u8 num_sprites[I915_MAX_PIPES];
@@ -247,6 +271,7 @@ static inline void sseu_set_eus(struct s
 
 const char *intel_platform_name(enum intel_platform platform);
 
+void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
 void intel_device_info_runtime_init(struct intel_device_info *info);
 void intel_device_info_dump(const struct intel_device_info *info,
 			    struct drm_printer *p);