Blob Blame History Raw
From f8526fb53358c5b6cd092a3284fa1027a99dd08b Mon Sep 17 00:00:00 2001
From: Ben Skeggs <bskeggs@redhat.com>
Date: Wed, 15 Jan 2020 06:34:22 +1000
Subject: drm/nouveau/flcn/qmgr: move sequence tracking from nvkm_msgqueue to
Git-commit: 0ae59432ba6d647297f2e1bed97139147ce140ac
Patch-mainline: v5.6-rc1
References: jsc#SLE-12680, jsc#SLE-12880, jsc#SLE-12882, jsc#SLE-12883, jsc#SLE-13496, jsc#SLE-15322
 nvkm_falcon_qmgr

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Signed-off-by: Patrik Jakobsson <pjakobsson@suse.de>
---
 drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c    |  4 +-
 drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c    | 10 +++--
 .../gpu/drm/nouveau/nvkm/falcon/msgqueue.c    |  5 ---
 .../gpu/drm/nouveau/nvkm/falcon/msgqueue.h    | 39 +-----------------
 drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c    | 10 ++++-
 drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h    | 40 ++++++++++++++++++-
 6 files changed, 55 insertions(+), 53 deletions(-)

diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
index 8b7796df697a..1cf6453fdd70 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c
@@ -164,7 +164,7 @@ nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio,
 	if (IS_ERR(queue))
 		return PTR_ERR(queue);
 
-	seq = msgqueue_seq_acquire(priv);
+	seq = nvkm_falcon_qmgr_seq_acquire(queue->qmgr);
 	if (IS_ERR(seq))
 		return PTR_ERR(seq);
 
@@ -178,7 +178,7 @@ nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio,
 	ret = cmd_write(priv, cmd, queue);
 	if (ret) {
 		seq->state = SEQ_STATE_PENDING;
-		msgqueue_seq_release(priv, seq);
+		nvkm_falcon_qmgr_seq_release(queue->qmgr, seq);
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
index 7be610427eef..303f9faf3423 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c
@@ -134,12 +134,14 @@ msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
 }
 
 static int
-msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr)
+msgqueue_msg_handle(struct nvkm_msgqueue *priv,
+		    struct nvkm_falcon_msgq *msgq,
+		    struct nvkm_msgqueue_hdr *hdr)
 {
 	const struct nvkm_subdev *subdev = priv->falcon->owner;
 	struct nvkm_msgqueue_seq *seq;
 
-	seq = &priv->seq[hdr->seq_id];
+	seq = &msgq->qmgr->seq[hdr->seq_id];
 	if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
 		nvkm_error(subdev, "msg for unknown sequence %d", seq->id);
 		return -EINVAL;
@@ -153,7 +155,7 @@ msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr)
 	if (seq->completion)
 		complete(seq->completion);
 
-	msgqueue_seq_release(priv, seq);
+	nvkm_falcon_qmgr_seq_release(msgq->qmgr, seq);
 	return 0;
 }
 
@@ -211,7 +213,7 @@ nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv,
 			priv->init_msg_received = true;
 	} else {
 		while (msg_queue_read(priv, queue, hdr) > 0)
-			msgqueue_msg_handle(priv, hdr);
+			msgqueue_msg_handle(priv, queue, hdr);
 	}
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
index eb2af85f3e3c..db3e4a3489bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
@@ -136,13 +136,8 @@ nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *func,
 		   struct nvkm_falcon *falcon,
 		   struct nvkm_msgqueue *queue)
 {
-	int i;
-
 	queue->func = func;
 	queue->falcon = falcon;
-	mutex_init(&queue->seq_lock);
-	for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++)
-		queue->seq[i].id = i;
 
 	init_completion(&queue->init_done);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
index 6729a7b52742..4cfa6b21d3df 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
@@ -144,36 +144,6 @@ struct nvkm_msgqueue_queue {
 	u32 tail_reg;
 };
 
-/**
- * struct nvkm_msgqueue_seq - keep track of ongoing commands
- *
- * Every time a command is sent, a sequence is assigned to it so the
- * corresponding message can be matched. Upon receiving the message, a callback
- * can be called and/or a completion signaled.
- *
- * @id:		sequence ID
- * @state:	current state
- * @callback:	callback to call upon receiving matching message
- * @completion:	completion to signal after callback is called
- */
-struct nvkm_msgqueue_seq {
-	u16 id;
-	enum {
-		SEQ_STATE_FREE = 0,
-		SEQ_STATE_PENDING,
-		SEQ_STATE_USED,
-		SEQ_STATE_CANCELLED
-	} state;
-	nvkm_msgqueue_callback callback;
-	struct completion *completion;
-};
-
-/*
- * We can have an arbitrary number of sequences, but realistically we will
- * probably not use that much simultaneously.
- */
-#define NVKM_MSGQUEUE_NUM_SEQUENCES 16
-
 /**
  * struct nvkm_msgqueue - manage a command/message based FW on a falcon
  *
@@ -181,20 +151,13 @@ struct nvkm_msgqueue_seq {
  * @func:	implementation of the firmware to use
  * @init_msg_received:	whether the init message has already been received
  * @init_done:	whether all init is complete and commands can be processed
- * @seq_lock:	protects seq and seq_tbl
- * @seq:	sequences to match commands and messages
- * @seq_tbl:	bitmap of sequences currently in use
- */
+  */
 struct nvkm_msgqueue {
 	struct nvkm_falcon *falcon;
 	const struct nvkm_msgqueue_func *func;
 	u32 fw_version;
 	bool init_msg_received;
 	struct completion init_done;
-
-	struct mutex seq_lock;
-	struct nvkm_msgqueue_seq seq[NVKM_MSGQUEUE_NUM_SEQUENCES];
-	unsigned long seq_tbl[BITS_TO_LONGS(NVKM_MSGQUEUE_NUM_SEQUENCES)];
 };
 
 void nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *, struct nvkm_falcon *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c
index f1469ecb7a4e..0cc192b55cc3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.c
@@ -23,7 +23,7 @@
 #include "qmgr.h"
 
 struct nvkm_msgqueue_seq *
-msgqueue_seq_acquire(struct nvkm_msgqueue *priv)
+nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *priv)
 {
 	const struct nvkm_subdev *subdev = priv->falcon->owner;
 	struct nvkm_msgqueue_seq *seq;
@@ -46,7 +46,8 @@ msgqueue_seq_acquire(struct nvkm_msgqueue *priv)
 }
 
 void
-msgqueue_seq_release(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_seq *seq)
+nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *priv,
+			     struct nvkm_msgqueue_seq *seq)
 {
 	/* no need to acquire seq_lock since clear_bit is atomic */
 	seq->state = SEQ_STATE_FREE;
@@ -70,10 +71,15 @@ nvkm_falcon_qmgr_new(struct nvkm_falcon *falcon,
 		     struct nvkm_falcon_qmgr **pqmgr)
 {
 	struct nvkm_falcon_qmgr *qmgr;
+	int i;
 
 	if (!(qmgr = *pqmgr = kzalloc(sizeof(*qmgr), GFP_KERNEL)))
 		return -ENOMEM;
 
 	qmgr->falcon = falcon;
+	mutex_init(&qmgr->seq_lock);
+	for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++)
+		qmgr->seq[i].id = i;
+
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
index 7b89fdd0c13e..35ac2cc85d2c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h
@@ -9,12 +9,48 @@
 /* max size of the messages we can receive */
 #define MSG_BUF_SIZE 128
 
+/**
+ * struct nvkm_msgqueue_seq - keep track of ongoing commands
+ *
+ * Every time a command is sent, a sequence is assigned to it so the
+ * corresponding message can be matched. Upon receiving the message, a callback
+ * can be called and/or a completion signaled.
+ *
+ * @id:		sequence ID
+ * @state:	current state
+ * @callback:	callback to call upon receiving matching message
+ * @completion:	completion to signal after callback is called
+ */
+struct nvkm_msgqueue_seq {
+	u16 id;
+	enum {
+		SEQ_STATE_FREE = 0,
+		SEQ_STATE_PENDING,
+		SEQ_STATE_USED,
+		SEQ_STATE_CANCELLED
+	} state;
+	nvkm_msgqueue_callback callback;
+	struct completion *completion;
+};
+
+/*
+ * We can have an arbitrary number of sequences, but realistically we will
+ * probably not use that much simultaneously.
+ */
+#define NVKM_MSGQUEUE_NUM_SEQUENCES 16
+
 struct nvkm_falcon_qmgr {
 	struct nvkm_falcon *falcon;
+
+	struct mutex seq_lock;
+	struct nvkm_msgqueue_seq seq[NVKM_MSGQUEUE_NUM_SEQUENCES];
+	unsigned long seq_tbl[BITS_TO_LONGS(NVKM_MSGQUEUE_NUM_SEQUENCES)];
 };
 
-struct nvkm_msgqueue_seq *msgqueue_seq_acquire(struct nvkm_msgqueue *);
-void msgqueue_seq_release(struct nvkm_msgqueue *, struct nvkm_msgqueue_seq *);
+struct nvkm_msgqueue_seq *
+nvkm_falcon_qmgr_seq_acquire(struct nvkm_falcon_qmgr *);
+void nvkm_falcon_qmgr_seq_release(struct nvkm_falcon_qmgr *,
+				  struct nvkm_msgqueue_seq *);
 
 #define FLCNQ_PRINTK(t,q,f,a...)                                               \
        FLCN_PRINTK(t, (q)->qmgr->falcon, "%s: "f, (q)->name, ##a)
-- 
2.28.0