Blob Blame History Raw
From: Intiyaz Basha <intiyaz.basha@cavium.com>
Date: Mon, 6 Aug 2018 13:09:40 -0700
Subject: liquidio: avoided acquiring post_lock for data only queues
Patch-mainline: v4.19-rc1
Git-commit: 7395a8845588ebdf170f4d3796d44142458741be
References: bsc#1119406 FATE#326562

All control commands (soft commands) goes through only Queue 0
(control and data queue). So only queue-0 needs post_lock,
other queues are only data queues and does not need post_lock

Added a flag to indicate the queue can be used for soft commands.

If this flag is set, post_lock must be acquired before posting
a command to the queue.
If this flag is clear, post_lock is invalid for the queue.

Signed-off-by: Intiyaz Basha <intiyaz.basha@cavium.com>
Signed-off-by: Felix Manlunas <felix.manlunas@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/cavium/liquidio/octeon_iq.h       |   10 +++++++
 drivers/net/ethernet/cavium/liquidio/request_manager.c |   22 ++++++++++++++---
 2 files changed, 29 insertions(+), 3 deletions(-)

--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -82,6 +82,16 @@ struct octeon_instr_queue {
 	/** A spinlock to protect while posting on the ring.  */
 	spinlock_t post_lock;
 
+	/** This flag indicates if the queue can be used for soft commands.
+	 *  If this flag is set, post_lock must be acquired before posting
+	 *  a command to the queue.
+	 *  If this flag is clear, post_lock is invalid for the queue.
+	 *  All control commands (soft commands) will go through only Queue 0
+	 *  (control and data queue). So only queue-0 needs post_lock,
+	 *  other queues are only data queues and does not need post_lock
+	 */
+	bool allow_soft_cmds;
+
 	u32 pkt_in_done;
 
 	/** A spinlock to protect access to the input ring.*/
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -125,7 +125,12 @@ int octeon_init_instr_queue(struct octeo
 
 	/* Initialize the spinlock for this instruction queue */
 	spin_lock_init(&iq->lock);
-	spin_lock_init(&iq->post_lock);
+	if (iq_no == 0) {
+		iq->allow_soft_cmds = true;
+		spin_lock_init(&iq->post_lock);
+	} else {
+		iq->allow_soft_cmds = false;
+	}
 
 	spin_lock_init(&iq->iq_flush_running_lock);
 
@@ -565,7 +570,8 @@ octeon_send_command(struct octeon_device
 	/* Get the lock and prevent other tasks and tx interrupt handler from
 	 * running.
 	 */
-	spin_lock_bh(&iq->post_lock);
+	if (iq->allow_soft_cmds)
+		spin_lock_bh(&iq->post_lock);
 
 	st = __post_command2(iq, cmd);
 
@@ -582,7 +588,8 @@ octeon_send_command(struct octeon_device
 		INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
 	}
 
-	spin_unlock_bh(&iq->post_lock);
+	if (iq->allow_soft_cmds)
+		spin_unlock_bh(&iq->post_lock);
 
 	/* This is only done here to expedite packets being flushed
 	 * for cases where there are no IQ completion interrupts.
@@ -701,11 +708,20 @@ octeon_prepare_soft_command(struct octeo
 int octeon_send_soft_command(struct octeon_device *oct,
 			     struct octeon_soft_command *sc)
 {
+	struct octeon_instr_queue *iq;
 	struct octeon_instr_ih2 *ih2;
 	struct octeon_instr_ih3 *ih3;
 	struct octeon_instr_irh *irh;
 	u32 len;
 
+	iq = oct->instr_queue[sc->iq_no];
+	if (!iq->allow_soft_cmds) {
+		dev_err(&oct->pci_dev->dev, "Soft commands are not allowed on Queue %d\n",
+			sc->iq_no);
+		INCR_INSTRQUEUE_PKT_COUNT(oct, sc->iq_no, instr_dropped, 1);
+		return IQ_SEND_FAILED;
+	}
+
 	if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
 		ih3 =  (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
 		if (ih3->dlengsz) {