Blob Blame History Raw
From: Enzo Matsumiya <ematsumiya@suse.de>
Subject: kabi: nvme: fix fast_io_fail_tmo
Patch-mainline: Never, kabi fix
References: bsc#1181161

Fix kabi breakage in nvme fast_io_fail_tmo patch
patches.suse/nvme-fabrics-reject-I-O-to-offline-device.patch

--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -63,7 +63,9 @@ enum {
 	NVMF_OPT_DISABLE_SQFLOW = 1 << 14,
 	NVMF_OPT_HDR_DIGEST	= 1 << 15,
 	NVMF_OPT_DATA_DIGEST	= 1 << 16,
+#ifndef __GENKSYMS__
 	NVMF_OPT_FAIL_FAST_TMO  = 1 << 20,
+#endif
 };
 
 /**
@@ -111,7 +113,9 @@ struct nvmf_ctrl_options {
 	bool			disable_sqflow;
 	bool			hdr_digest;
 	bool			data_digest;
+#ifndef __GENKSYMS__
 	int			fast_io_fail_tmo;
+#endif
 };
 
 /*
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -165,9 +165,6 @@ struct nvme_ctrl {
 	const struct nvme_ctrl_ops *ops;
 	struct request_queue *admin_q;
 	struct request_queue *connect_q;
-#ifndef __GENKSYMS__
-	struct request_queue *fabrics_q;
-#endif
 	struct device *dev;
 	int instance;
 	int numa_node;
@@ -222,7 +219,6 @@ struct nvme_ctrl {
 	struct work_struct scan_work;
 	struct work_struct async_event_work;
 	struct delayed_work ka_work;
-	struct delayed_work failfast_work;
 	struct nvme_command ka_cmd;
 	struct work_struct fw_act_work;
 	unsigned long events;
@@ -257,9 +253,13 @@ struct nvme_ctrl {
 	u16 icdoff;
 	u16 maxcmd;
 	int nr_reconnects;
+	struct nvmf_ctrl_options *opts;
+#ifndef __GENKSYMS__
+	struct request_queue *fabrics_q;
+	struct delayed_work failfast_work;
 	unsigned long flags;
 #define NVME_CTRL_FAILFAST_EXPIRED	0
-	struct nvmf_ctrl_options *opts;
+#endif
 };
 
 enum nvme_iopolicy {
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -298,8 +298,10 @@ static bool nvme_available_path(struct n
 	struct nvme_ns *ns;
 
 	list_for_each_entry_rcu(ns, &head->list, siblings) {
+#ifndef __GENKSYMS__
 		if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
 			continue;
+#endif
 		switch (ns->ctrl->state) {
 		case NVME_CTRL_LIVE:
 		case NVME_CTRL_RESETTING:
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -125,6 +125,7 @@ static void nvme_queue_scan(struct nvme_
 		queue_work(nvme_wq, &ctrl->scan_work);
 }
 
+#ifndef __GENKSYMS__
 static void nvme_failfast_work(struct work_struct *work)
 {
 	struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
@@ -155,7 +156,7 @@ static inline void nvme_stop_failfast_wo
 	cancel_delayed_work_sync(&ctrl->failfast_work);
 	clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
 }
-
+#endif
 
 int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
 {
@@ -418,13 +419,18 @@ bool nvme_change_ctrl_state(struct nvme_
 		return false;
 
 	if (ctrl->state == NVME_CTRL_LIVE) {
+#ifndef __GENKSYMS__
 		if (old_state == NVME_CTRL_CONNECTING)
 			nvme_stop_failfast_work(ctrl);
+#endif
 		nvme_kick_requeue_lists(ctrl);
-	} else if (ctrl->state == NVME_CTRL_CONNECTING &&
+	}
+#ifndef __GENKSYMS__
+	else if (ctrl->state == NVME_CTRL_CONNECTING &&
 		old_state == NVME_CTRL_RESETTING) {
 		nvme_start_failfast_work(ctrl);
 	}
+#endif
 	return changed;
 }
 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
@@ -3642,7 +3648,9 @@ void nvme_stop_ctrl(struct nvme_ctrl *ct
 {
 	nvme_mpath_stop(ctrl);
 	nvme_stop_keep_alive(ctrl);
+#ifndef __GENKSYMS__
 	nvme_stop_failfast_work(ctrl);
+#endif
 	flush_work(&ctrl->async_event_work);
 	cancel_work_sync(&ctrl->fw_act_work);
 	if (ctrl->ops->stop_ctrl)
@@ -3723,7 +3731,9 @@ int nvme_init_ctrl(struct nvme_ctrl *ctr
 	int ret;
 
 	ctrl->state = NVME_CTRL_NEW;
+#ifndef __GENKSYMS__
 	clear_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags);
+#endif
 	spin_lock_init(&ctrl->lock);
 	mutex_init(&ctrl->scan_lock);
 	INIT_LIST_HEAD(&ctrl->namespaces);
@@ -3737,7 +3747,9 @@ int nvme_init_ctrl(struct nvme_ctrl *ctr
 	INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
 
 	INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
+#ifndef __GENKSYMS__
 	INIT_DELAYED_WORK(&ctrl->failfast_work, nvme_failfast_work);
+#endif
 	memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
 	ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
 
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -577,9 +577,11 @@ blk_status_t nvmf_fail_nonready_command(
 {
 	if (ctrl->state != NVME_CTRL_DELETING &&
 	    ctrl->state != NVME_CTRL_DEAD &&
-	    !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) &&
 	    !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
-		return BLK_STS_RESOURCE;
+#ifndef __GENKSYMS__
+		    if (!test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags))
+#endif
+			return BLK_STS_RESOURCE;
 
 	nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR;
 	blk_mq_start_request(rq);
@@ -641,7 +643,9 @@ static const match_table_t opt_tokens =
 	{ NVMF_OPT_DISABLE_SQFLOW,	"disable_sqflow"	},
 	{ NVMF_OPT_HDR_DIGEST,		"hdr_digest"		},
 	{ NVMF_OPT_DATA_DIGEST,		"data_digest"		},
+#ifndef __GENKSYMS__
 	{ NVMF_OPT_FAIL_FAST_TMO,	"fast_io_fail_tmo=%d"   },
+#endif
 	{ NVMF_OPT_ERR,			NULL			}
 };
 
@@ -661,7 +665,9 @@ static int nvmf_parse_options(struct nvm
 	opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
 	opts->kato = NVME_DEFAULT_KATO;
 	opts->duplicate_connect = false;
+#ifndef __GENKSYMS__
 	opts->fast_io_fail_tmo = NVMF_DEF_FAIL_FAST_TMO;
+#endif
 	opts->hdr_digest = false;
 	opts->data_digest = false;
 
@@ -788,6 +794,7 @@ static int nvmf_parse_options(struct nvm
 				pr_warn("ctrl_loss_tmo < 0 will reconnect forever\n");
 			ctrl_loss_tmo = token;
 			break;
+#ifndef __GENKSYMS__
 		case NVMF_OPT_FAIL_FAST_TMO:
 			if (match_int(args, &token)) {
 				ret = -EINVAL;
@@ -799,6 +806,7 @@ static int nvmf_parse_options(struct nvm
 					token);
 			opts->fast_io_fail_tmo = token;
 			break;
+#endif
 		case NVMF_OPT_HOSTNQN:
 			if (opts->host) {
 				pr_err("hostnqn already user-assigned: %s\n",
@@ -893,9 +901,11 @@ static int nvmf_parse_options(struct nvm
 	} else {
 		opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo,
 						opts->reconnect_delay);
+#ifndef __GENKSYMS__
 		if (ctrl_loss_tmo < opts->fast_io_fail_tmo)
 			pr_warn("failfast tmo (%d) larger than controller loss tmo (%d)\n",
 				opts->fast_io_fail_tmo, ctrl_loss_tmo);
+#endif
 	}
 
 	if (!opts->host) {
@@ -993,11 +1003,18 @@ void nvmf_free_options(struct nvmf_ctrl_
 EXPORT_SYMBOL_GPL(nvmf_free_options);
 
 #define NVMF_REQUIRED_OPTS	(NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
+#ifndef __GENKSYMS__
 #define NVMF_ALLOWED_OPTS	(NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
 				 NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
 				 NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
 				 NVMF_OPT_DISABLE_SQFLOW |\
 				 NVMF_OPT_FAIL_FAST_TMO)
+#else
+#define NVMF_ALLOWED_OPTS	(NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
+				 NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
+				 NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
+				 NVMF_OPT_DISABLE_SQFLOW)
+#endif
 
 static struct nvme_ctrl *
 nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)