Blob Blame History Raw
From: James Smart <jsmart2021@gmail.com>
Date: Mon, 16 Aug 2021 09:28:55 -0700
Subject: scsi: lpfc: Add rx monitoring statistics
Patch-mainline: v5.15-rc1
Git-commit: 17b27ac5922454ff7de91cbed458643608c36abc
References: bsc#1190576

The driver provides overwatch of the cm behavior by maintaining a set of rx
I/O statistics. This information is also used in later updating of the cm
statistics buffer.

Link: https://lore.kernel.org/r/20210816162901.121235-11-jsmart2021@gmail.com
Co-developed-by: Justin Tee <justin.tee@broadcom.com>
Signed-off-by: Justin Tee <justin.tee@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Acked-by: Daniel Wagner <dwagner@suse.de>
---
 drivers/scsi/lpfc/lpfc.h      |   21 +++++++++++++++++
 drivers/scsi/lpfc/lpfc_init.c |   50 ++++++++++++++++++++++++++++++++++++++++++
 drivers/scsi/lpfc/lpfc_mem.c  |    4 +++
 drivers/scsi/lpfc/lpfc_scsi.c |    2 +
 drivers/scsi/lpfc/lpfc_sli.c  |   15 ++++++++++++
 5 files changed, 92 insertions(+)

--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1447,6 +1447,12 @@ struct lpfc_hba {
 	u32 cgn_sig_freq;
 	u32 cgn_acqe_cnt;
 
+	/* RX monitor handling for CMF */
+	struct rxtable_entry *rxtable;  /* RX_monitor information */
+	atomic_t rxtable_idx_head;
+#define LPFC_RXMONITOR_TABLE_IN_USE     (LPFC_MAX_RXMONITOR_ENTRY + 73)
+	atomic_t rxtable_idx_tail;
+	atomic_t rx_max_read_cnt;       /* Maximum read bytes */
 	uint64_t rx_block_cnt;
 
 	/* Congestion parameters from flash */
@@ -1492,6 +1498,21 @@ struct lpfc_hba {
 	struct dbg_log_ent dbg_log[DBG_LOG_SZ];
 };
 
+#define LPFC_MAX_RXMONITOR_ENTRY	800
+struct rxtable_entry {
+	uint64_t total_bytes;   /* Total no of read bytes requested */
+	uint64_t rcv_bytes;     /* Total no of read bytes completed */
+	uint64_t avg_io_size;
+	uint64_t avg_io_latency;/* Average io latency in microseconds */
+	uint64_t max_read_cnt;  /* Maximum read bytes */
+	uint64_t max_bytes_per_interval;
+	uint32_t cmf_busy;
+	uint32_t cmf_info;      /* CMF_SYNC_WQE info */
+	uint32_t io_cnt;
+	uint32_t timer_utilization;
+	uint32_t timer_interval;
+};
+
 static inline struct Scsi_Host *
 lpfc_shost_from_vport(struct lpfc_vport *vport)
 {
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -5358,9 +5358,13 @@ lpfc_cmf_timer(struct hrtimer *timer)
 {
 	struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
 					     cmf_timer);
+	struct rxtable_entry *entry;
 	uint32_t io_cnt;
+	uint32_t head, tail;
+	uint32_t busy, max_read;
 	uint64_t total, rcv, lat, mbpi;
 	int timer_interval = LPFC_CMF_INTERVAL;
+	uint32_t ms;
 	struct lpfc_cgn_stat *cgs;
 	int cpu;
 
@@ -5385,6 +5389,14 @@ lpfc_cmf_timer(struct hrtimer *timer)
 	 */
 	atomic_set(&phba->cmf_stop_io, 1);
 
+	/* First we need to calculate the actual ms between
+	 * the last timer interrupt and this one. We ask for
+	 * LPFC_CMF_INTERVAL, however the actual time may
+	 * vary depending on system overhead.
+	 */
+	ms = lpfc_calc_cmf_latency(phba);
+
+
 	/* Immediately after we calculate the time since the last
 	 * timer interrupt, set the start time for the next
 	 * interrupt
@@ -5431,6 +5443,8 @@ lpfc_cmf_timer(struct hrtimer *timer)
 		atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
 		atomic64_add(lat, &phba->cgn_latency_evt);
 	}
+	busy = atomic_xchg(&phba->cmf_busy, 0);
+	max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
 
 	/* Calculate MBPI for the next timer interval */
 	if (mbpi) {
@@ -5445,6 +5459,42 @@ lpfc_cmf_timer(struct hrtimer *timer)
 			phba->cmf_max_bytes_per_interval = mbpi;
 	}
 
+	/* Save rxmonitor information for debug */
+	if (phba->rxtable) {
+		head = atomic_xchg(&phba->rxtable_idx_head,
+				   LPFC_RXMONITOR_TABLE_IN_USE);
+		entry = &phba->rxtable[head];
+		entry->total_bytes = total;
+		entry->rcv_bytes = rcv;
+		entry->cmf_busy = busy;
+		entry->cmf_info = phba->cmf_active_info;
+		if (io_cnt) {
+			entry->avg_io_latency = div_u64(lat, io_cnt);
+			entry->avg_io_size = div_u64(rcv, io_cnt);
+		} else {
+			entry->avg_io_latency = 0;
+			entry->avg_io_size = 0;
+		}
+		entry->max_read_cnt = max_read;
+		entry->io_cnt = io_cnt;
+		entry->max_bytes_per_interval = mbpi;
+		if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
+			entry->timer_utilization = phba->cmf_last_ts;
+		else
+			entry->timer_utilization = ms;
+		entry->timer_interval = ms;
+		phba->cmf_last_ts = 0;
+
+		/* Increment rxtable index */
+		head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
+		tail = atomic_read(&phba->rxtable_idx_tail);
+		if (head == tail) {
+			tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
+			atomic_set(&phba->rxtable_idx_tail, tail);
+		}
+		atomic_set(&phba->rxtable_idx_head, head);
+	}
+
 	if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
 		/* If Monitor mode, check if we are oversubscribed
 		 * against the full line rate.
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -347,6 +347,10 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
 		phba->cgn_i = NULL;
 	}
 
+	/* Free RX table */
+	kfree(phba->rxtable);
+	phba->rxtable = NULL;
+
 	/* Free the iocb lookup array */
 	kfree(psli->iocbq_lookup);
 	psli->iocbq_lookup = NULL;
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3990,6 +3990,8 @@ lpfc_update_cmf_cmd(struct lpfc_hba *phb
 			atomic_inc(&phba->cmf_busy);
 			return -EBUSY;
 		}
+		if (size > atomic_read(&phba->rx_max_read_cnt))
+			atomic_set(&phba->rx_max_read_cnt, size);
 	}
 
 	cgs = this_cpu_ptr(phba->cmf_stat);
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -7940,6 +7940,21 @@ lpfc_cmf_setup(struct lpfc_hba *phba)
 	atomic64_set(&phba->cgn_latency_evt, 0);
 
 	phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
+
+	/* Allocate RX Monitor Buffer */
+	if (!phba->rxtable) {
+		phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY,
+					      sizeof(struct rxtable_entry),
+					      GFP_KERNEL);
+		if (!phba->rxtable) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2644 Failed to alloc memory "
+					"for RX Monitor Buffer\n");
+			return -ENOMEM;
+		}
+	}
+	atomic_set(&phba->rxtable_idx_head, 0);
+	atomic_set(&phba->rxtable_idx_tail, 0);
 	return 0;
 }