summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
diff options
context:
space:
mode:
authorXi Wang <wangxi11@huawei.com>2020-04-28 19:03:43 +0800
committerJason Gunthorpe <jgg@mellanox.com>2020-05-06 17:26:44 -0300
commit67954a6e379b4678536ef14a1d49ea78fcdc4a1f (patch)
tree7ecb2f837f8ed09864dcbc939a06359e2c63e506 /drivers/infiniband/hw/hns/hns_roce_hw_v2.c
parentffb1308b88b6023205249f11332ebff83610899a (diff)
RDMA/hns: Optimize SRQ buffer size calculating process
Optimize the SRQ's WQE buffer parameters calculating process to make the codes more readable by using new functions about multi-hop addressing to calculating capabilities of SRQ. Link: https://lore.kernel.org/r/1588071823-40200-6-git-send-email-liweihang@huawei.com Signed-off-by: Xi Wang <wangxi11@huawei.com> Signed-off-by: Weihang Li <liweihang@huawei.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/hw/hns/hns_roce_hw_v2.c')
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 0b79dafe919e..f70370d24512 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -699,6 +699,12 @@ static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
}
+static void *get_idx_buf(struct hns_roce_idx_que *idx_que, int n)
+{
+ return hns_roce_buf_offset(idx_que->mtr.kmem,
+ n << idx_que->entry_shift);
+}
+
static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
{
/* always called with interrupts disabled. */
@@ -725,16 +731,6 @@ static int find_empty_entry(struct hns_roce_idx_que *idx_que,
return wqe_idx;
}
-static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
- int cur_idx, int wqe_idx)
-{
- unsigned int *addr;
-
- addr = (unsigned int *)hns_roce_buf_offset(idx_que->mtr.kmem,
- cur_idx * idx_que->entry_sz);
- *addr = wqe_idx;
-}
-
static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
@@ -744,6 +740,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_v2_db srq_db;
unsigned long flags;
+ __le32 *srq_idx;
int ret = 0;
int wqe_idx;
void *wqe;
@@ -775,7 +772,6 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
break;
}
- fill_idx_queue(&srq->idx_que, ind, wqe_idx);
wqe = get_srq_wqe(srq, wqe_idx);
dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
@@ -791,6 +787,9 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
dseg[i].addr = 0;
}
+ srq_idx = get_idx_buf(&srq->idx_que, ind);
+ *srq_idx = cpu_to_le32(wqe_idx);
+
srq->wrid[wqe_idx] = wr->wr_id;
ind = (ind + 1) & (srq->wqe_cnt - 1);
}
@@ -4901,8 +4900,8 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
roce_set_field(srq_context->byte_4_srqn_srqst,
SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
- (hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
- hr_dev->caps.srqwqe_hop_num));
+ to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
+ srq->wqe_cnt));
roce_set_field(srq_context->byte_4_srqn_srqst,
SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
ilog2(srq->wqe_cnt));
@@ -4944,8 +4943,8 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
- hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
- hr_dev->caps.idx_hop_num);
+ to_hr_hem_hopnum(hr_dev->caps.idx_hop_num,
+ srq->wqe_cnt));
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,