Blob Blame History Raw
From: Hannes Reinecke <hare@suse.de>
Date: Mon, 30 Jul 2018 13:38:00 +0200
Subject: [PATCH] kABI fixes for nvme-if_ready-checks-fail-io-to-deleting-controll.patch
References: bsc#1077989
Patch-Mainline: never, kABI fix for SLE15

Signed-off-by: Hannes Reinecke <hare@suse.com>
---
 drivers/nvme/host/fabrics.c | 11 ++++++++++-
 drivers/nvme/host/fabrics.h |  3 ++-
 drivers/nvme/host/fc.c      |  2 +-
 drivers/nvme/host/rdma.c    |  2 +-
 drivers/nvme/target/loop.c  |  2 +-
 5 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 4d24137db42f..3dc0980d7435 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -545,7 +545,7 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
  * Note: commands used to initialize the controller will be marked for failfast.
  * Note: nvme cli/ioctl commands are marked for failfast.
  */
-blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
+blk_status_t __nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
 		struct request *rq)
 {
 	if (ctrl->state != NVME_CTRL_DELETING &&
@@ -555,6 +555,15 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
 	nvme_complete_rq(rq);
 	return BLK_STS_OK;
 }
+EXPORT_SYMBOL_GPL(__nvmf_fail_nonready_command);
+
+blk_status_t nvmf_fail_nonready_command(struct request *rq)
+{
+	if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+		return BLK_STS_RESOURCE;
+	nvme_req(rq)->status = NVME_SC_ABORT_REQ;
+	return BLK_STS_IOERR;
+}
 EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
 
 bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index aa2fdb2a2e8f..3a576f939e85 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -162,7 +162,8 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
 void nvmf_free_options(struct nvmf_ctrl_options *opts);
 int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
 bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
-blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
+blk_status_t nvmf_fail_nonready_command(struct request *rq);
+blk_status_t __nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
 		struct request *rq);
 bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
 		bool queue_live);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index ac5d2698f792..f9bc1628c2ab 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2280,7 +2280,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
 
 	if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
 	    !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
-		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
+		return __nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
 
 	ret = nvme_setup_cmd(ns, rq, sqe);
 	if (ret)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 280b6349f380..d42bada25b54 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1628,7 +1628,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 	WARN_ON_ONCE(rq->tag < 0);
 
 	if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
-		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
+		return __nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
 
 	dev = queue->device->dev;
 	ib_dma_sync_single_for_cpu(dev, sqe->dma,
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 88dfc7f295d0..1357e6333c0c 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 	blk_status_t ret;
 
 	if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
-		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
+		return __nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
 
 	ret = nvme_setup_cmd(ns, req, &iod->cmd);
 	if (ret)
-- 
2.12.3