Blob Blame History Raw
From: "Wei Hu(Xavier)" <xavier.huwei@huawei.com>
Date: Wed, 30 Aug 2017 17:23:02 +0800
Subject: RDMA/hns: Modify assignment device variable to support both PCI
 device and platform device
Patch-mainline: v4.15-rc1
Git-commit: 13ca970e3692e498e1544d0c5141f20da9a8e89d
References: bsc#1104427 FATE#326416

In order to support the scalability of the hardware version, the
features irrelevant to the hardware will be located in the hns-roce.ko,
and the hardware relevant operations will be located in hns_roce_hw_v1.ko
or hns_roce_hw_v2.ko based on the series chips.

The hip08 RoCE engine is a PCI device, hip06 RoCE engine is a platform
device. In order to support both platform device and PCI device, We
replace &hr_dev->pdev->dev with hr_dev->dev in hns-roce.ko as belows:
	Before modification:
		struct device *dev = hr_dev->dev;
	After modification:
		struct device *dev = &hr_dev->pdev->dev;

	The related structure:
	struct hns_roce_dev {
		...
		struct platform_device  *pdev;
		struct pci_dev		*pci_dev;
		struct device		*dev;
		...
	}

Signed-off-by: Lijun Ou <oulijun@huawei.com>
Signed-off-by: Shaobo Xu <xushaobo2@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/infiniband/hw/hns/hns_roce_ah.c    |    2 +-
 drivers/infiniband/hw/hns/hns_roce_alloc.c |    6 +++---
 drivers/infiniband/hw/hns/hns_roce_cmd.c   |    6 +++---
 drivers/infiniband/hw/hns/hns_roce_cq.c    |   12 ++++++------
 drivers/infiniband/hw/hns/hns_roce_hem.c   |   12 ++++++------
 drivers/infiniband/hw/hns/hns_roce_hw_v1.c |    1 +
 drivers/infiniband/hw/hns/hns_roce_main.c  |   12 ++++++------
 drivers/infiniband/hw/hns/hns_roce_mr.c    |    8 ++++----
 drivers/infiniband/hw/hns/hns_roce_pd.c    |    2 +-
 drivers/infiniband/hw/hns/hns_roce_qp.c    |   20 ++++++++++----------
 10 files changed, 41 insertions(+), 40 deletions(-)

--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -44,7 +44,7 @@ struct ib_ah *hns_roce_create_ah(struct
 				 struct ib_udata *udata)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct ib_gid_attr gid_attr;
 	struct hns_roce_ah *ah;
 	u16 vlan_tag = 0xffff;
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -161,7 +161,7 @@ void hns_roce_buf_free(struct hns_roce_d
 		       struct hns_roce_buf *buf)
 {
 	int i;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	u32 bits_per_long = BITS_PER_LONG;
 
 	if (buf->nbufs == 1) {
@@ -172,7 +172,7 @@ void hns_roce_buf_free(struct hns_roce_d
 
 		for (i = 0; i < buf->nbufs; ++i)
 			if (buf->page_list[i].buf)
-				dma_free_coherent(&hr_dev->pdev->dev, PAGE_SIZE,
+				dma_free_coherent(dev, PAGE_SIZE,
 						  buf->page_list[i].buf,
 						  buf->page_list[i].map);
 		kfree(buf->page_list);
@@ -186,7 +186,7 @@ int hns_roce_buf_alloc(struct hns_roce_d
 	int i = 0;
 	dma_addr_t t;
 	struct page **pages;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	u32 bits_per_long = BITS_PER_LONG;
 
 	/* SQ/RQ buf lease than one page, SQ + RQ = 8K */
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -125,7 +125,7 @@ static int __hns_roce_cmd_mbox_poll(stru
 				    u8 op_modifier, u16 op,
 				    unsigned long timeout)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	u8 __iomem *hcr = hr_dev->cmd.hcr;
 	unsigned long end = 0;
 	u32 status = 0;
@@ -196,8 +196,8 @@ static int __hns_roce_cmd_mbox_wait(stru
 				    unsigned long timeout)
 {
 	struct hns_roce_cmdq *cmd = &hr_dev->cmd;
-	struct device *dev = &hr_dev->pdev->dev;
 	struct hns_roce_cmd_context *context;
+	struct device *dev = hr_dev->dev;
 	int ret = 0;
 
 	spin_lock(&cmd->context_lock);
@@ -273,7 +273,7 @@ EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox);
 
 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 
 	mutex_init(&hr_dev->cmd.hcr_mutex);
 	sema_init(&hr_dev->cmd.poll_sem, 1);
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -58,7 +58,7 @@ static void hns_roce_ib_cq_event(struct
 	if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
 	    event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
 	    event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
-		dev_err(&hr_dev->pdev->dev,
+		dev_err(hr_dev->dev,
 			"hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
 			event_type, hr_cq->cqn);
 		return;
@@ -87,7 +87,7 @@ static int hns_roce_cq_alloc(struct hns_
 {
 	struct hns_roce_cmd_mailbox *mailbox = NULL;
 	struct hns_roce_cq_table *cq_table = NULL;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	dma_addr_t dma_handle;
 	u64 *mtts = NULL;
 	int ret = 0;
@@ -182,7 +182,7 @@ static int hns_roce_hw2sw_cq(struct hns_
 void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
 {
 	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	int ret;
 
 	ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
@@ -282,7 +282,7 @@ struct ib_cq *hns_roce_ib_create_cq(stru
 				    struct ib_udata *udata)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct hns_roce_ib_create_cq ucmd;
 	struct hns_roce_cq *hr_cq = NULL;
 	struct hns_roce_uar *uar = NULL;
@@ -416,7 +416,7 @@ EXPORT_SYMBOL_GPL(hns_roce_ib_destroy_cq
 
 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct hns_roce_cq *cq;
 
 	cq = radix_tree_lookup(&hr_dev->cq_table.tree,
@@ -432,7 +432,7 @@ void hns_roce_cq_completion(struct hns_r
 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
 {
 	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct hns_roce_cq *cq;
 
 	cq = radix_tree_lookup(&cq_table->tree,
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -84,7 +84,7 @@ struct hns_roce_hem *hns_roce_alloc_hem(
 		 * memory, directly return fail.
 		 */
 		mem = &chunk->mem[chunk->npages];
-		buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order,
+		buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
 				&sg_dma_address(mem), gfp_mask);
 		if (!buf)
 			goto fail;
@@ -115,7 +115,7 @@ void hns_roce_free_hem(struct hns_roce_d
 
 	list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
 		for (i = 0; i < chunk->npages; ++i)
-			dma_free_coherent(&hr_dev->pdev->dev,
+			dma_free_coherent(hr_dev->dev,
 				   chunk->mem[i].length,
 				   lowmem_page_address(sg_page(&chunk->mem[i])),
 				   sg_dma_address(&chunk->mem[i]));
@@ -128,8 +128,8 @@ void hns_roce_free_hem(struct hns_roce_d
 static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
 			    struct hns_roce_hem_table *table, unsigned long obj)
 {
-	struct device *dev = &hr_dev->pdev->dev;
 	spinlock_t *lock = &hr_dev->bt_cmd_lock;
+	struct device *dev = hr_dev->dev;
 	unsigned long end = 0;
 	unsigned long flags;
 	struct hns_roce_hem_iter iter;
@@ -212,7 +212,7 @@ static int hns_roce_set_hem(struct hns_r
 int hns_roce_table_get(struct hns_roce_dev *hr_dev,
 		       struct hns_roce_hem_table *table, unsigned long obj)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	int ret = 0;
 	unsigned long i;
 
@@ -251,7 +251,7 @@ out:
 void hns_roce_table_put(struct hns_roce_dev *hr_dev,
 			struct hns_roce_hem_table *table, unsigned long obj)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	unsigned long i;
 
 	i = (obj & (table->num_obj - 1)) /
@@ -380,7 +380,7 @@ int hns_roce_init_hem_table(struct hns_r
 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
 				struct hns_roce_hem_table *table)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	unsigned long i;
 
 	for (i = 0; i < table->num_hem; ++i)
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -4050,6 +4050,7 @@ static int hns_roce_probe(struct platfor
 	}
 
 	hr_dev->pdev = pdev;
+	hr_dev->dev = dev;
 	platform_set_drvdata(pdev, hr_dev);
 
 	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -117,7 +117,7 @@ static int hns_roce_del_gid(struct ib_de
 static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
 			   unsigned long event)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct net_device *netdev;
 
 	netdev = hr_dev->iboe.netdevs[port];
@@ -240,7 +240,7 @@ static int hns_roce_query_port(struct ib
 			       struct ib_port_attr *props)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct net_device *net_dev;
 	unsigned long flags;
 	enum ib_mtu mtu;
@@ -428,7 +428,7 @@ static int hns_roce_register_device(stru
 	int ret;
 	struct hns_roce_ib_iboe *iboe = NULL;
 	struct ib_device *ib_dev = NULL;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 
 	iboe = &hr_dev->iboe;
 	spin_lock_init(&iboe->lock);
@@ -536,7 +536,7 @@ error_failed_setup_mtu_mac:
 static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
 {
 	int ret;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 
 	ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_table,
 				      HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz,
@@ -605,7 +605,7 @@ err_unmap_mtt:
 static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
 {
 	int ret;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 
 	spin_lock_init(&hr_dev->sm_lock);
 	spin_lock_init(&hr_dev->bt_cmd_lock);
@@ -668,7 +668,7 @@ err_uar_table_free:
 int hns_roce_init(struct hns_roce_dev *hr_dev)
 {
 	int ret;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 
 	if (hr_dev->hw->reset) {
 		ret = hr_dev->hw->reset(hr_dev, true);
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -241,9 +241,9 @@ static int hns_roce_mr_alloc(struct hns_
 			     u64 size, u32 access, int npages,
 			     struct hns_roce_mr *mr)
 {
+	struct device *dev = hr_dev->dev;
 	unsigned long index = 0;
 	int ret = 0;
-	struct device *dev = &hr_dev->pdev->dev;
 
 	/* Allocate a key for mr from mr_table */
 	ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
@@ -276,7 +276,7 @@ static int hns_roce_mr_alloc(struct hns_
 static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
 			     struct hns_roce_mr *mr)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	int npages = 0;
 	int ret;
 
@@ -302,7 +302,7 @@ static int hns_roce_mr_enable(struct hns
 {
 	int ret;
 	unsigned long mtpt_idx = key_to_hw_index(mr->key);
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct hns_roce_cmd_mailbox *mailbox;
 	struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
 
@@ -552,7 +552,7 @@ struct ib_mr *hns_roce_reg_user_mr(struc
 				   struct ib_udata *udata)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct hns_roce_mr *mr = NULL;
 	int ret = 0;
 	int n = 0;
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -60,7 +60,7 @@ struct ib_pd *hns_roce_alloc_pd(struct i
 				struct ib_udata *udata)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct hns_roce_pd *pd;
 	int ret;
 
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -44,7 +44,7 @@
 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
 {
 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct hns_roce_qp *qp;
 
 	spin_lock(&qp_table->lock);
@@ -154,7 +154,7 @@ static int hns_roce_gsi_qp_alloc(struct
 				hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
 	spin_unlock_irq(&qp_table->lock);
 	if (ret) {
-		dev_err(&hr_dev->pdev->dev, "QPC radix_tree_insert failed\n");
+		dev_err(hr_dev->dev, "QPC radix_tree_insert failed\n");
 		goto err_put_irrl;
 	}
 
@@ -172,7 +172,7 @@ static int hns_roce_qp_alloc(struct hns_
 			     struct hns_roce_qp *hr_qp)
 {
 	struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	int ret;
 
 	if (!qpn)
@@ -261,8 +261,8 @@ static int hns_roce_set_rq_size(struct h
 				struct ib_qp_cap *cap, int is_user, int has_srq,
 				struct hns_roce_qp *hr_qp)
 {
+	struct device *dev = hr_dev->dev;
 	u32 max_cnt;
-	struct device *dev = &hr_dev->pdev->dev;
 
 	/* Check the validity of QP support capacity */
 	if (cap->max_recv_wr > hr_dev->caps.max_wqes ||
@@ -319,7 +319,7 @@ static int hns_roce_set_user_sq_size(str
 	if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
 	     ucmd->log_sq_stride > max_sq_stride ||
 	     ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
-		dev_err(&hr_dev->pdev->dev, "check SQ size error!\n");
+		dev_err(hr_dev->dev, "check SQ size error!\n");
 		return -EINVAL;
 	}
 
@@ -343,7 +343,7 @@ static int hns_roce_set_kernel_sq_size(s
 				       struct ib_qp_cap *cap,
 				       struct hns_roce_qp *hr_qp)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	u32 max_cnt;
 
 	if (cap->max_send_wr  > hr_dev->caps.max_wqes  ||
@@ -395,7 +395,7 @@ static int hns_roce_create_qp_common(str
 				     struct ib_udata *udata, unsigned long sqpn,
 				     struct hns_roce_qp *hr_qp)
 {
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct hns_roce_ib_create_qp ucmd;
 	unsigned long qpn = 0;
 	int ret = 0;
@@ -575,7 +575,7 @@ struct ib_qp *hns_roce_create_qp(struct
 				 struct ib_udata *udata)
 {
 	struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	struct hns_roce_sqp *hr_sqp;
 	struct hns_roce_qp *hr_qp;
 	int ret;
@@ -660,7 +660,7 @@ int hns_roce_modify_qp(struct ib_qp *ibq
 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
 	enum ib_qp_state cur_state, new_state;
-	struct device *dev = &hr_dev->pdev->dev;
+	struct device *dev = hr_dev->dev;
 	int ret = -EINVAL;
 	int p;
 	enum ib_mtu active_mtu;
@@ -835,7 +835,7 @@ int hns_roce_init_qp_table(struct hns_ro
 				   hr_dev->caps.num_qps - 1, SQP_NUM,
 				   reserved_from_top);
 	if (ret) {
-		dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n",
+		dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
 			ret);
 		return ret;
 	}