RDMA/hns: Support multi hop addressing for PBL in hip08
[linux-2.6-block.git] / drivers / infiniband / hw / hns / hns_roce_hw_v2.c
CommitLineData
dd74282d
WHX
1/*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/acpi.h>
34#include <linux/etherdevice.h>
35#include <linux/interrupt.h>
36#include <linux/kernel.h>
37#include <rdma/ib_umem.h>
38
39#include "hnae3.h"
40#include "hns_roce_common.h"
41#include "hns_roce_device.h"
42#include "hns_roce_cmd.h"
43#include "hns_roce_hem.h"
a04ff739 44#include "hns_roce_hw_v2.h"
dd74282d 45
a04ff739
WHX
46static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
47{
48 int ntu = ring->next_to_use;
49 int ntc = ring->next_to_clean;
50 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
51
52 return ring->desc_num - used - 1;
53}
54
55static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
56 struct hns_roce_v2_cmq_ring *ring)
57{
58 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
59
60 ring->desc = kzalloc(size, GFP_KERNEL);
61 if (!ring->desc)
62 return -ENOMEM;
63
64 ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
65 DMA_BIDIRECTIONAL);
66 if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
67 ring->desc_dma_addr = 0;
68 kfree(ring->desc);
69 ring->desc = NULL;
70 return -ENOMEM;
71 }
72
73 return 0;
74}
75
76static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
77 struct hns_roce_v2_cmq_ring *ring)
78{
79 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
80 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
81 DMA_BIDIRECTIONAL);
82 kfree(ring->desc);
83}
84
85static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
86{
87 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
88 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
89 &priv->cmq.csq : &priv->cmq.crq;
90
91 ring->flag = ring_type;
92 ring->next_to_clean = 0;
93 ring->next_to_use = 0;
94
95 return hns_roce_alloc_cmq_desc(hr_dev, ring);
96}
97
98static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
99{
100 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
101 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
102 &priv->cmq.csq : &priv->cmq.crq;
103 dma_addr_t dma = ring->desc_dma_addr;
104
105 if (ring_type == TYPE_CSQ) {
106 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
107 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
108 upper_32_bits(dma));
109 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
110 (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
111 HNS_ROCE_CMQ_ENABLE);
112 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
113 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
114 } else {
115 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
116 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
117 upper_32_bits(dma));
118 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
119 (ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S) |
120 HNS_ROCE_CMQ_ENABLE);
121 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
122 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
123 }
124}
125
126static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
127{
128 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
129 int ret;
130
131 /* Setup the queue entries for command queue */
132 priv->cmq.csq.desc_num = 1024;
133 priv->cmq.crq.desc_num = 1024;
134
135 /* Setup the lock for command queue */
136 spin_lock_init(&priv->cmq.csq.lock);
137 spin_lock_init(&priv->cmq.crq.lock);
138
139 /* Setup Tx write back timeout */
140 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
141
142 /* Init CSQ */
143 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
144 if (ret) {
145 dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
146 return ret;
147 }
148
149 /* Init CRQ */
150 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
151 if (ret) {
152 dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
153 goto err_crq;
154 }
155
156 /* Init CSQ REG */
157 hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
158
159 /* Init CRQ REG */
160 hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
161
162 return 0;
163
164err_crq:
165 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
166
167 return ret;
168}
169
170static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
171{
172 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
173
174 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
175 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
176}
177
178void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
179 enum hns_roce_opcode_type opcode,
180 bool is_read)
181{
182 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
183 desc->opcode = cpu_to_le16(opcode);
184 desc->flag =
185 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
186 if (is_read)
187 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
188 else
189 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
190}
191
192static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
193{
194 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
195 u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
196
197 return head == priv->cmq.csq.next_to_use;
198}
199
200static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
201{
202 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
203 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
204 struct hns_roce_cmq_desc *desc;
205 u16 ntc = csq->next_to_clean;
206 u32 head;
207 int clean = 0;
208
209 desc = &csq->desc[ntc];
210 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
211 while (head != ntc) {
212 memset(desc, 0, sizeof(*desc));
213 ntc++;
214 if (ntc == csq->desc_num)
215 ntc = 0;
216 desc = &csq->desc[ntc];
217 clean++;
218 }
219 csq->next_to_clean = ntc;
220
221 return clean;
222}
223
224int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
225 struct hns_roce_cmq_desc *desc, int num)
226{
227 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
228 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
229 struct hns_roce_cmq_desc *desc_to_use;
230 bool complete = false;
231 u32 timeout = 0;
232 int handle = 0;
233 u16 desc_ret;
234 int ret = 0;
235 int ntc;
236
237 spin_lock_bh(&csq->lock);
238
239 if (num > hns_roce_cmq_space(csq)) {
240 spin_unlock_bh(&csq->lock);
241 return -EBUSY;
242 }
243
244 /*
245 * Record the location of desc in the cmq for this time
246 * which will be use for hardware to write back
247 */
248 ntc = csq->next_to_use;
249
250 while (handle < num) {
251 desc_to_use = &csq->desc[csq->next_to_use];
252 *desc_to_use = desc[handle];
253 dev_dbg(hr_dev->dev, "set cmq desc:\n");
254 csq->next_to_use++;
255 if (csq->next_to_use == csq->desc_num)
256 csq->next_to_use = 0;
257 handle++;
258 }
259
260 /* Write to hardware */
261 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
262
263 /*
264 * If the command is sync, wait for the firmware to write back,
265 * if multi descriptors to be sent, use the first one to check
266 */
267 if ((desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
268 do {
269 if (hns_roce_cmq_csq_done(hr_dev))
270 break;
271 usleep_range(1000, 2000);
272 timeout++;
273 } while (timeout < priv->cmq.tx_timeout);
274 }
275
276 if (hns_roce_cmq_csq_done(hr_dev)) {
277 complete = true;
278 handle = 0;
279 while (handle < num) {
280 /* get the result of hardware write back */
281 desc_to_use = &csq->desc[ntc];
282 desc[handle] = *desc_to_use;
283 dev_dbg(hr_dev->dev, "Get cmq desc:\n");
284 desc_ret = desc[handle].retval;
285 if (desc_ret == CMD_EXEC_SUCCESS)
286 ret = 0;
287 else
288 ret = -EIO;
289 priv->cmq.last_status = desc_ret;
290 ntc++;
291 handle++;
292 if (ntc == csq->desc_num)
293 ntc = 0;
294 }
295 }
296
297 if (!complete)
298 ret = -EAGAIN;
299
300 /* clean the command send queue */
301 handle = hns_roce_cmq_csq_clean(hr_dev);
302 if (handle != num)
303 dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
304 handle, num);
305
306 spin_unlock_bh(&csq->lock);
307
308 return ret;
309}
310
cfc85f3e
WHX
311int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
312{
313 struct hns_roce_query_version *resp;
314 struct hns_roce_cmq_desc desc;
315 int ret;
316
317 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
318 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
319 if (ret)
320 return ret;
321
322 resp = (struct hns_roce_query_version *)desc.data;
323 hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
324 hr_dev->vendor_id = le32_to_cpu(resp->rocee_vendor_id);
325
326 return 0;
327}
328
329static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
330{
331 struct hns_roce_cfg_global_param *req;
332 struct hns_roce_cmq_desc desc;
333
334 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
335 false);
336
337 req = (struct hns_roce_cfg_global_param *)desc.data;
338 memset(req, 0, sizeof(*req));
339 roce_set_field(req->time_cfg_udp_port,
340 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
341 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
342 roce_set_field(req->time_cfg_udp_port,
343 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
344 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
345
346 return hns_roce_cmq_send(hr_dev, &desc, 1);
347}
348
349static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
350{
351 struct hns_roce_cmq_desc desc[2];
352 struct hns_roce_pf_res *res;
353 int ret;
354 int i;
355
356 for (i = 0; i < 2; i++) {
357 hns_roce_cmq_setup_basic_desc(&desc[i],
358 HNS_ROCE_OPC_QUERY_PF_RES, true);
359
360 if (i == 0)
361 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
362 else
363 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
364 }
365
366 ret = hns_roce_cmq_send(hr_dev, desc, 2);
367 if (ret)
368 return ret;
369
370 res = (struct hns_roce_pf_res *)desc[0].data;
371
372 hr_dev->caps.qpc_bt_num = roce_get_field(res->qpc_bt_idx_num,
373 PF_RES_DATA_1_PF_QPC_BT_NUM_M,
374 PF_RES_DATA_1_PF_QPC_BT_NUM_S);
375 hr_dev->caps.srqc_bt_num = roce_get_field(res->srqc_bt_idx_num,
376 PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
377 PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
378 hr_dev->caps.cqc_bt_num = roce_get_field(res->cqc_bt_idx_num,
379 PF_RES_DATA_3_PF_CQC_BT_NUM_M,
380 PF_RES_DATA_3_PF_CQC_BT_NUM_S);
381 hr_dev->caps.mpt_bt_num = roce_get_field(res->mpt_bt_idx_num,
382 PF_RES_DATA_4_PF_MPT_BT_NUM_M,
383 PF_RES_DATA_4_PF_MPT_BT_NUM_S);
384
385 return 0;
386}
387
388static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
389{
390 struct hns_roce_cmq_desc desc[2];
391 struct hns_roce_vf_res_a *req_a;
392 struct hns_roce_vf_res_b *req_b;
393 int i;
394
395 req_a = (struct hns_roce_vf_res_a *)desc[0].data;
396 req_b = (struct hns_roce_vf_res_b *)desc[1].data;
397 memset(req_a, 0, sizeof(*req_a));
398 memset(req_b, 0, sizeof(*req_b));
399 for (i = 0; i < 2; i++) {
400 hns_roce_cmq_setup_basic_desc(&desc[i],
401 HNS_ROCE_OPC_ALLOC_VF_RES, false);
402
403 if (i == 0)
404 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
405 else
406 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
407
408 if (i == 0) {
409 roce_set_field(req_a->vf_qpc_bt_idx_num,
410 VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
411 VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
412 roce_set_field(req_a->vf_qpc_bt_idx_num,
413 VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
414 VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
415 HNS_ROCE_VF_QPC_BT_NUM);
416
417 roce_set_field(req_a->vf_srqc_bt_idx_num,
418 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
419 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
420 roce_set_field(req_a->vf_srqc_bt_idx_num,
421 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
422 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
423 HNS_ROCE_VF_SRQC_BT_NUM);
424
425 roce_set_field(req_a->vf_cqc_bt_idx_num,
426 VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
427 VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
428 roce_set_field(req_a->vf_cqc_bt_idx_num,
429 VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
430 VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
431 HNS_ROCE_VF_CQC_BT_NUM);
432
433 roce_set_field(req_a->vf_mpt_bt_idx_num,
434 VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
435 VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
436 roce_set_field(req_a->vf_mpt_bt_idx_num,
437 VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
438 VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
439 HNS_ROCE_VF_MPT_BT_NUM);
440
441 roce_set_field(req_a->vf_eqc_bt_idx_num,
442 VF_RES_A_DATA_5_VF_EQC_IDX_M,
443 VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
444 roce_set_field(req_a->vf_eqc_bt_idx_num,
445 VF_RES_A_DATA_5_VF_EQC_NUM_M,
446 VF_RES_A_DATA_5_VF_EQC_NUM_S,
447 HNS_ROCE_VF_EQC_NUM);
448 } else {
449 roce_set_field(req_b->vf_smac_idx_num,
450 VF_RES_B_DATA_1_VF_SMAC_IDX_M,
451 VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
452 roce_set_field(req_b->vf_smac_idx_num,
453 VF_RES_B_DATA_1_VF_SMAC_NUM_M,
454 VF_RES_B_DATA_1_VF_SMAC_NUM_S,
455 HNS_ROCE_VF_SMAC_NUM);
456
457 roce_set_field(req_b->vf_sgid_idx_num,
458 VF_RES_B_DATA_2_VF_SGID_IDX_M,
459 VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
460 roce_set_field(req_b->vf_sgid_idx_num,
461 VF_RES_B_DATA_2_VF_SGID_NUM_M,
462 VF_RES_B_DATA_2_VF_SGID_NUM_S,
463 HNS_ROCE_VF_SGID_NUM);
464
465 roce_set_field(req_b->vf_qid_idx_sl_num,
466 VF_RES_B_DATA_3_VF_QID_IDX_M,
467 VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
468 roce_set_field(req_b->vf_qid_idx_sl_num,
469 VF_RES_B_DATA_3_VF_SL_NUM_M,
470 VF_RES_B_DATA_3_VF_SL_NUM_S,
471 HNS_ROCE_VF_SL_NUM);
472 }
473 }
474
475 return hns_roce_cmq_send(hr_dev, desc, 2);
476}
477
a81fba28
WHX
478static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
479{
480 u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
481 u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
482 u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
483 u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
484 struct hns_roce_cfg_bt_attr *req;
485 struct hns_roce_cmq_desc desc;
486
487 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
488 req = (struct hns_roce_cfg_bt_attr *)desc.data;
489 memset(req, 0, sizeof(*req));
490
491 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
492 CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
493 hr_dev->caps.qpc_ba_pg_sz);
494 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
495 CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
496 hr_dev->caps.qpc_buf_pg_sz);
497 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
498 CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
499 qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
500
501 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
502 CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
503 hr_dev->caps.srqc_ba_pg_sz);
504 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
505 CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
506 hr_dev->caps.srqc_buf_pg_sz);
507 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
508 CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
509 srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
510
511 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
512 CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
513 hr_dev->caps.cqc_ba_pg_sz);
514 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
515 CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
516 hr_dev->caps.cqc_buf_pg_sz);
517 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
518 CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
519 cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
520
521 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
522 CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
523 hr_dev->caps.mpt_ba_pg_sz);
524 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
525 CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
526 hr_dev->caps.mpt_buf_pg_sz);
527 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
528 CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
529 mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
530
531 return hns_roce_cmq_send(hr_dev, &desc, 1);
532}
533
cfc85f3e
WHX
534static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
535{
536 struct hns_roce_caps *caps = &hr_dev->caps;
537 int ret;
538
539 ret = hns_roce_cmq_query_hw_info(hr_dev);
540 if (ret) {
541 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
542 ret);
543 return ret;
544 }
545
546 ret = hns_roce_config_global_param(hr_dev);
547 if (ret) {
548 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
549 ret);
550 }
551
552 /* Get pf resource owned by every pf */
553 ret = hns_roce_query_pf_resource(hr_dev);
554 if (ret) {
555 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
556 ret);
557 return ret;
558 }
559
560 ret = hns_roce_alloc_vf_resource(hr_dev);
561 if (ret) {
562 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
563 ret);
564 return ret;
565 }
566
567 hr_dev->vendor_part_id = 0;
568 hr_dev->sys_image_guid = 0;
569
570 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
571 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
572 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
573 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
574 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
575 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
576 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
577 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
578 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
579 caps->num_aeq_vectors = 1;
580 caps->num_comp_vectors = 63;
581 caps->num_other_vectors = 0;
582 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
583 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
584 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
585 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
586 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
587 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
588 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
589 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
590 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
591 caps->qpc_entry_sz = HNS_ROCE_V2_QPC_ENTRY_SZ;
592 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
593 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
594 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
595 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
596 caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
597 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
598 caps->reserved_lkey = 0;
599 caps->reserved_pds = 0;
600 caps->reserved_mrws = 1;
601 caps->reserved_uars = 0;
602 caps->reserved_cqs = 0;
603
a25d13cb
SX
604 caps->qpc_ba_pg_sz = 0;
605 caps->qpc_buf_pg_sz = 0;
606 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
607 caps->srqc_ba_pg_sz = 0;
608 caps->srqc_buf_pg_sz = 0;
609 caps->srqc_hop_num = HNS_ROCE_HOP_NUM_0;
610 caps->cqc_ba_pg_sz = 0;
611 caps->cqc_buf_pg_sz = 0;
612 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
613 caps->mpt_ba_pg_sz = 0;
614 caps->mpt_buf_pg_sz = 0;
615 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
ff795f71
WHX
616 caps->pbl_ba_pg_sz = 0;
617 caps->pbl_buf_pg_sz = 0;
618 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
6a93c77a
SX
619 caps->mtt_ba_pg_sz = 0;
620 caps->mtt_buf_pg_sz = 0;
621 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
622 caps->cqe_ba_pg_sz = 0;
623 caps->cqe_buf_pg_sz = 0;
624 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
a25d13cb 625
cfc85f3e
WHX
626 caps->pkey_table_len[0] = 1;
627 caps->gid_table_len[0] = 2;
628 caps->local_ca_ack_delay = 0;
629 caps->max_mtu = IB_MTU_4096;
630
a81fba28
WHX
631 ret = hns_roce_v2_set_bt(hr_dev);
632 if (ret)
633 dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
634 ret);
635
636 return ret;
cfc85f3e
WHX
637}
638
a680f2f3
WHX
639static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
640{
641 u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
642
643 return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
644}
645
646static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
647{
648 u32 status = readl(hr_dev->reg_base + ROCEE_VF_MB_STATUS_REG);
649
650 return status & HNS_ROCE_HW_MB_STATUS_MASK;
651}
652
653static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
654 u64 out_param, u32 in_modifier, u8 op_modifier,
655 u16 op, u16 token, int event)
656{
657 struct device *dev = hr_dev->dev;
658 u32 *hcr = (u32 *)(hr_dev->reg_base + ROCEE_VF_MB_CFG0_REG);
659 unsigned long end;
660 u32 val0 = 0;
661 u32 val1 = 0;
662
663 end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
664 while (hns_roce_v2_cmd_pending(hr_dev)) {
665 if (time_after(jiffies, end)) {
666 dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
667 (int)end);
668 return -EAGAIN;
669 }
670 cond_resched();
671 }
672
673 roce_set_field(val0, HNS_ROCE_VF_MB4_TAG_MASK,
674 HNS_ROCE_VF_MB4_TAG_SHIFT, in_modifier);
675 roce_set_field(val0, HNS_ROCE_VF_MB4_CMD_MASK,
676 HNS_ROCE_VF_MB4_CMD_SHIFT, op);
677 roce_set_field(val1, HNS_ROCE_VF_MB5_EVENT_MASK,
678 HNS_ROCE_VF_MB5_EVENT_SHIFT, event);
679 roce_set_field(val1, HNS_ROCE_VF_MB5_TOKEN_MASK,
680 HNS_ROCE_VF_MB5_TOKEN_SHIFT, token);
681
682 __raw_writeq(cpu_to_le64(in_param), hcr + 0);
683 __raw_writeq(cpu_to_le64(out_param), hcr + 2);
684
685 /* Memory barrier */
686 wmb();
687
688 __raw_writel(cpu_to_le32(val0), hcr + 4);
689 __raw_writel(cpu_to_le32(val1), hcr + 5);
690
691 mmiowb();
692
693 return 0;
694}
695
696static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
697 unsigned long timeout)
698{
699 struct device *dev = hr_dev->dev;
700 unsigned long end = 0;
701 u32 status;
702
703 end = msecs_to_jiffies(timeout) + jiffies;
704 while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
705 cond_resched();
706
707 if (hns_roce_v2_cmd_pending(hr_dev)) {
708 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
709 return -ETIMEDOUT;
710 }
711
712 status = hns_roce_v2_cmd_complete(hr_dev);
713 if (status != 0x1) {
714 dev_err(dev, "mailbox status 0x%x!\n", status);
715 return -EBUSY;
716 }
717
718 return 0;
719}
720
a81fba28
WHX
721static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
722 struct hns_roce_hem_table *table, int obj,
723 int step_idx)
724{
725 struct device *dev = hr_dev->dev;
726 struct hns_roce_cmd_mailbox *mailbox;
727 struct hns_roce_hem_iter iter;
728 struct hns_roce_hem_mhop mhop;
729 struct hns_roce_hem *hem;
730 unsigned long mhop_obj = obj;
731 int i, j, k;
732 int ret = 0;
733 u64 hem_idx = 0;
734 u64 l1_idx = 0;
735 u64 bt_ba = 0;
736 u32 chunk_ba_num;
737 u32 hop_num;
738 u16 op = 0xff;
739
740 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
741 return 0;
742
743 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
744 i = mhop.l0_idx;
745 j = mhop.l1_idx;
746 k = mhop.l2_idx;
747 hop_num = mhop.hop_num;
748 chunk_ba_num = mhop.bt_chunk_size / 8;
749
750 if (hop_num == 2) {
751 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
752 k;
753 l1_idx = i * chunk_ba_num + j;
754 } else if (hop_num == 1) {
755 hem_idx = i * chunk_ba_num + j;
756 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
757 hem_idx = i;
758 }
759
760 switch (table->type) {
761 case HEM_TYPE_QPC:
762 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
763 break;
764 case HEM_TYPE_MTPT:
765 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
766 break;
767 case HEM_TYPE_CQC:
768 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
769 break;
770 case HEM_TYPE_SRQC:
771 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
772 break;
773 default:
774 dev_warn(dev, "Table %d not to be written by mailbox!\n",
775 table->type);
776 return 0;
777 }
778 op += step_idx;
779
780 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
781 if (IS_ERR(mailbox))
782 return PTR_ERR(mailbox);
783
784 if (check_whether_last_step(hop_num, step_idx)) {
785 hem = table->hem[hem_idx];
786 for (hns_roce_hem_first(hem, &iter);
787 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
788 bt_ba = hns_roce_hem_addr(&iter);
789
790 /* configure the ba, tag, and op */
791 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
792 obj, 0, op,
793 HNS_ROCE_CMD_TIMEOUT_MSECS);
794 }
795 } else {
796 if (step_idx == 0)
797 bt_ba = table->bt_l0_dma_addr[i];
798 else if (step_idx == 1 && hop_num == 2)
799 bt_ba = table->bt_l1_dma_addr[l1_idx];
800
801 /* configure the ba, tag, and op */
802 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
803 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
804 }
805
806 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
807 return ret;
808}
809
810static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
811 struct hns_roce_hem_table *table, int obj,
812 int step_idx)
813{
814 struct device *dev = hr_dev->dev;
815 struct hns_roce_cmd_mailbox *mailbox;
816 int ret = 0;
817 u16 op = 0xff;
818
819 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
820 return 0;
821
822 switch (table->type) {
823 case HEM_TYPE_QPC:
824 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
825 break;
826 case HEM_TYPE_MTPT:
827 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
828 break;
829 case HEM_TYPE_CQC:
830 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
831 break;
832 case HEM_TYPE_SRQC:
833 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
834 break;
835 default:
836 dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
837 table->type);
838 return 0;
839 }
840 op += step_idx;
841
842 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
843 if (IS_ERR(mailbox))
844 return PTR_ERR(mailbox);
845
846 /* configure the tag and op */
847 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
848 HNS_ROCE_CMD_TIMEOUT_MSECS);
849
850 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
851 return ret;
852}
853
a04ff739
WHX
854static const struct hns_roce_hw hns_roce_hw_v2 = {
855 .cmq_init = hns_roce_v2_cmq_init,
856 .cmq_exit = hns_roce_v2_cmq_exit,
cfc85f3e 857 .hw_profile = hns_roce_v2_profile,
a680f2f3
WHX
858 .post_mbox = hns_roce_v2_post_mbox,
859 .chk_mbox = hns_roce_v2_chk_mbox,
a81fba28
WHX
860 .set_hem = hns_roce_v2_set_hem,
861 .clear_hem = hns_roce_v2_clear_hem,
a04ff739 862};
dd74282d
WHX
863
864static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
865 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
866 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
867 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
868 /* required last entry */
869 {0, }
870};
871
872static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
873 struct hnae3_handle *handle)
874{
875 const struct pci_device_id *id;
876
877 id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
878 if (!id) {
879 dev_err(hr_dev->dev, "device is not compatible!\n");
880 return -ENXIO;
881 }
882
883 hr_dev->hw = &hns_roce_hw_v2;
884
885 /* Get info from NIC driver. */
886 hr_dev->reg_base = handle->rinfo.roce_io_base;
887 hr_dev->caps.num_ports = 1;
888 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
889 hr_dev->iboe.phy_port[0] = 0;
890
891 /* cmd issue mode: 0 is poll, 1 is event */
892 hr_dev->cmd_mod = 0;
893 hr_dev->loop_idc = 0;
894
895 return 0;
896}
897
898static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
899{
900 struct hns_roce_dev *hr_dev;
901 int ret;
902
903 hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
904 if (!hr_dev)
905 return -ENOMEM;
906
a04ff739
WHX
907 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
908 if (!hr_dev->priv) {
909 ret = -ENOMEM;
910 goto error_failed_kzalloc;
911 }
912
dd74282d
WHX
913 hr_dev->pci_dev = handle->pdev;
914 hr_dev->dev = &handle->pdev->dev;
915 handle->priv = hr_dev;
916
917 ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
918 if (ret) {
919 dev_err(hr_dev->dev, "Get Configuration failed!\n");
920 goto error_failed_get_cfg;
921 }
922
923 ret = hns_roce_init(hr_dev);
924 if (ret) {
925 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
926 goto error_failed_get_cfg;
927 }
928
929 return 0;
930
931error_failed_get_cfg:
a04ff739
WHX
932 kfree(hr_dev->priv);
933
934error_failed_kzalloc:
dd74282d
WHX
935 ib_dealloc_device(&hr_dev->ib_dev);
936
937 return ret;
938}
939
940static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
941 bool reset)
942{
943 struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
944
945 hns_roce_exit(hr_dev);
a04ff739 946 kfree(hr_dev->priv);
dd74282d
WHX
947 ib_dealloc_device(&hr_dev->ib_dev);
948}
949
950static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
951 .init_instance = hns_roce_hw_v2_init_instance,
952 .uninit_instance = hns_roce_hw_v2_uninit_instance,
953};
954
955static struct hnae3_client hns_roce_hw_v2_client = {
956 .name = "hns_roce_hw_v2",
957 .type = HNAE3_CLIENT_ROCE,
958 .ops = &hns_roce_hw_v2_ops,
959};
960
961static int __init hns_roce_hw_v2_init(void)
962{
963 return hnae3_register_client(&hns_roce_hw_v2_client);
964}
965
966static void __exit hns_roce_hw_v2_exit(void)
967{
968 hnae3_unregister_client(&hns_roce_hw_v2_client);
969}
970
971module_init(hns_roce_hw_v2_init);
972module_exit(hns_roce_hw_v2_exit);
973
974MODULE_LICENSE("Dual BSD/GPL");
975MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
976MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
977MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
978MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");