net: qed: update copyright years
[linux-block.git] / drivers / net / ethernet / qlogic / qed / qed_roce.c
CommitLineData
1f4d4ed6 1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
51ff1725 2/* QLogic qed NIC Driver
e8f1cb50 3 * Copyright (c) 2015-2017 QLogic Corporation
663eacd8 4 * Copyright (c) 2019-2020 Marvell International Ltd.
51ff1725 5 */
1f4d4ed6 6
51ff1725
RA
7#include <linux/types.h>
8#include <asm/byteorder.h>
9#include <linux/bitops.h>
10#include <linux/delay.h>
11#include <linux/dma-mapping.h>
12#include <linux/errno.h>
51ff1725 13#include <linux/io.h>
51ff1725
RA
14#include <linux/kernel.h>
15#include <linux/list.h>
16#include <linux/module.h>
17#include <linux/mutex.h>
18#include <linux/pci.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/string.h>
61be82b0 22#include <linux/if_vlan.h>
51ff1725
RA
23#include "qed.h"
24#include "qed_cxt.h"
61be82b0 25#include "qed_dcbx.h"
51ff1725
RA
26#include "qed_hsi.h"
27#include "qed_hw.h"
28#include "qed_init_ops.h"
29#include "qed_int.h"
30#include "qed_ll2.h"
31#include "qed_mcp.h"
32#include "qed_reg_addr.h"
7003cdd6 33#include <linux/qed/qed_rdma_if.h>
b71b9afd
KM
34#include "qed_rdma.h"
35#include "qed_roce.h"
8e8dddba 36#include "qed_sp.h"
51ff1725 37
be086e7c
MY
38static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
39
6c9e80ea
MK
40static int
41qed_roce_async_event(struct qed_hwfn *p_hwfn,
42 u8 fw_event_code,
43 u16 echo, union event_ring_data *data, u8 fw_return_code)
51ff1725 44{
39dbc646
YB
45 struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
46
b71b9afd
KM
47 if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
48 u16 icid =
49 (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid);
c295f86e 50
b71b9afd
KM
51 /* icid release in this async event can occur only if the icid
52 * was offloaded to the FW. In case it wasn't offloaded this is
53 * handled in qed_roce_sp_destroy_qp.
54 */
55 qed_roce_free_real_icid(p_hwfn, icid);
56 } else {
39dbc646
YB
57 if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY ||
58 fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) {
59 u16 srq_id = (u16)data->rdma_data.async_handle.lo;
60
61 events.affiliated_event(events.context, fw_event_code,
62 &srq_id);
63 } else {
64 union rdma_eqe_data rdata = data->rdma_data;
c295f86e 65
39dbc646
YB
66 events.affiliated_event(events.context, fw_event_code,
67 (void *)&rdata.async_handle);
68 }
b71b9afd 69 }
c295f86e 70
b71b9afd 71 return 0;
c295f86e
RA
72}
73
b71b9afd 74void qed_roce_stop(struct qed_hwfn *p_hwfn)
f1093940 75{
b71b9afd
KM
76 struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
77 int wait_count = 0;
78
79 /* when destroying a_RoCE QP the control is returned to the user after
80 * the synchronous part. The asynchronous part may take a little longer.
81 * We delay for a short while if an async destroy QP is still expected.
82 * Beyond the added delay we clear the bitmap anyway.
83 */
84 while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
85 msleep(100);
86 if (wait_count++ > 20) {
87 DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
88 break;
89 }
90 }
f1093940
RA
91}
92
93static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
94 __le32 *dst_gid)
95{
96 u32 i;
97
98 if (qp->roce_mode == ROCE_V2_IPV4) {
99 /* The IPv4 addresses shall be aligned to the highest word.
100 * The lower words must be zero.
101 */
102 memset(src_gid, 0, sizeof(union qed_gid));
103 memset(dst_gid, 0, sizeof(union qed_gid));
104 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
105 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
106 } else {
107 /* GIDs and IPv6 addresses coincide in location and size */
108 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
109 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
110 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
111 }
112 }
113}
114
115static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
116{
f1093940
RA
117 switch (roce_mode) {
118 case ROCE_V1:
d3a31579 119 return PLAIN_ROCE;
f1093940 120 case ROCE_V2_IPV4:
d3a31579 121 return RROCE_IPV4;
f1093940 122 case ROCE_V2_IPV6:
d3a31579 123 return RROCE_IPV6;
f1093940 124 default:
d3a31579 125 return MAX_ROCE_FLAVOR;
f1093940 126 }
f1093940
RA
127}
128
bf774d14 129static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
be086e7c
MY
130{
131 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
132 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
133 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
134 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
135}
136
b71b9afd 137int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
f1093940
RA
138{
139 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
140 u32 responder_icid;
141 u32 requester_icid;
142 int rc;
143
144 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
145 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
146 &responder_icid);
147 if (rc) {
148 spin_unlock_bh(&p_rdma_info->lock);
149 return rc;
150 }
151
152 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
153 &requester_icid);
154
155 spin_unlock_bh(&p_rdma_info->lock);
156 if (rc)
157 goto err;
158
159 /* the two icid's should be adjacent */
160 if ((requester_icid - responder_icid) != 1) {
161 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
162 rc = -EINVAL;
163 goto err;
164 }
165
166 responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
167 p_rdma_info->proto);
168 requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
169 p_rdma_info->proto);
170
171 /* If these icids require a new ILT line allocate DMA-able context for
172 * an ILT page
173 */
174 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
175 if (rc)
176 goto err;
177
178 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
179 if (rc)
180 goto err;
181
182 *cid = (u16)responder_icid;
183 return rc;
184
185err:
186 spin_lock_bh(&p_rdma_info->lock);
187 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
188 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
189
190 spin_unlock_bh(&p_rdma_info->lock);
191 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
192 "Allocate CID - failed, rc = %d\n", rc);
193 return rc;
194}
195
be086e7c
MY
196static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
197{
198 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
199 qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
200 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
201}
202
61be82b0
DB
203static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
204{
205 u8 pri, tc = 0;
206
207 if (qp->vlan_id) {
208 pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
209 tc = qed_dcbx_get_priority_tc(p_hwfn, pri);
210 }
211
212 DP_VERBOSE(p_hwfn, QED_MSG_SP,
213 "qp icid %u tc: %u (vlan priority %s)\n",
214 qp->icid, tc, qp->vlan_id ? "enabled" : "disabled");
215
216 return tc;
217}
218
f1093940
RA
219static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
220 struct qed_rdma_qp *qp)
221{
222 struct roce_create_qp_resp_ramrod_data *p_ramrod;
61be82b0 223 u16 regular_latency_queue, low_latency_queue;
f1093940 224 struct qed_sp_init_data init_data;
f1093940
RA
225 enum roce_flavor roce_flavor;
226 struct qed_spq_entry *p_ent;
be086e7c 227 enum protocol_type proto;
f1093940 228 int rc;
61be82b0 229 u8 tc;
f1093940 230
7bfb399e
YB
231 if (!qp->has_resp)
232 return 0;
233
f1093940
RA
234 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
235
236 /* Allocate DMA-able memory for IRQ */
237 qp->irq_num_pages = 1;
238 qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
239 RDMA_RING_PAGE_SIZE,
240 &qp->irq_phys_addr, GFP_KERNEL);
241 if (!qp->irq) {
242 rc = -ENOMEM;
243 DP_NOTICE(p_hwfn,
244 "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
245 rc);
246 return rc;
247 }
248
249 /* Get SPQ entry */
250 memset(&init_data, 0, sizeof(init_data));
251 init_data.cid = qp->icid;
252 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
253 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
254
255 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
256 PROTOCOLID_ROCE, &init_data);
257 if (rc)
258 goto err;
259
260 p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
261
262 p_ramrod->flags = 0;
263
264 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
265 SET_FIELD(p_ramrod->flags,
266 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
267
268 SET_FIELD(p_ramrod->flags,
269 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
270 qp->incoming_rdma_read_en);
271
272 SET_FIELD(p_ramrod->flags,
273 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
274 qp->incoming_rdma_write_en);
275
276 SET_FIELD(p_ramrod->flags,
277 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
278 qp->incoming_atomic_en);
279
280 SET_FIELD(p_ramrod->flags,
281 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
282 qp->e2e_flow_control_en);
283
284 SET_FIELD(p_ramrod->flags,
285 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
286
287 SET_FIELD(p_ramrod->flags,
288 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
289 qp->fmr_and_reserved_lkey);
290
291 SET_FIELD(p_ramrod->flags,
292 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
293 qp->min_rnr_nak_timer);
294
7bfb399e
YB
295 SET_FIELD(p_ramrod->flags,
296 ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG,
297 qed_rdma_is_xrc_qp(qp));
298
f1093940
RA
299 p_ramrod->max_ird = qp->max_rd_atomic_resp;
300 p_ramrod->traffic_class = qp->traffic_class_tos;
301 p_ramrod->hop_limit = qp->hop_limit_ttl;
302 p_ramrod->irq_num_pages = qp->irq_num_pages;
303 p_ramrod->p_key = cpu_to_le16(qp->pkey);
304 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
305 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
306 p_ramrod->mtu = cpu_to_le16(qp->mtu);
307 p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
308 p_ramrod->pd = cpu_to_le16(qp->pd);
309 p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
310 DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
311 DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
312 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
313 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
314 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
315 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
316 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
f1093940
RA
317 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
318 qp->rq_cq_id);
7bfb399e 319 p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id);
f1093940 320
61be82b0
DB
321 tc = qed_roce_get_qp_tc(p_hwfn, qp);
322 regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
323 low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
324 DP_VERBOSE(p_hwfn, QED_MSG_SP,
325 "qp icid %u pqs: regular_latency %u low_latency %u\n",
326 qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
327 low_latency_queue - CM_TX_PQ_BASE);
be086e7c
MY
328 p_ramrod->regular_latency_phy_queue =
329 cpu_to_le16(regular_latency_queue);
330 p_ramrod->low_latency_phy_queue =
61be82b0 331 cpu_to_le16(low_latency_queue);
f1093940 332
f1093940
RA
333 p_ramrod->dpi = cpu_to_le16(qp->dpi);
334
335 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
336 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
337
338 p_ramrod->udp_src_port = qp->udp_src_port;
339 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
340 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
341 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
342
343 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
344 qp->stats_queue;
345
346 rc = qed_spq_post(p_hwfn, p_ent, NULL);
f1093940
RA
347 if (rc)
348 goto err;
349
350 qp->resp_offloaded = true;
be086e7c
MY
351 qp->cq_prod = 0;
352
353 proto = p_hwfn->p_rdma_info->proto;
354 qed_roce_set_real_cid(p_hwfn, qp->icid -
355 qed_cxt_get_proto_cid_start(p_hwfn, proto));
f1093940
RA
356
357 return rc;
358
359err:
360 DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
361 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
362 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
363 qp->irq, qp->irq_phys_addr);
364
365 return rc;
366}
367
368static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
369 struct qed_rdma_qp *qp)
370{
371 struct roce_create_qp_req_ramrod_data *p_ramrod;
61be82b0 372 u16 regular_latency_queue, low_latency_queue;
f1093940 373 struct qed_sp_init_data init_data;
f1093940
RA
374 enum roce_flavor roce_flavor;
375 struct qed_spq_entry *p_ent;
be086e7c 376 enum protocol_type proto;
f1093940 377 int rc;
61be82b0 378 u8 tc;
f1093940 379
7bfb399e
YB
380 if (!qp->has_req)
381 return 0;
382
f1093940
RA
383 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
384
385 /* Allocate DMA-able memory for ORQ */
386 qp->orq_num_pages = 1;
387 qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
388 RDMA_RING_PAGE_SIZE,
389 &qp->orq_phys_addr, GFP_KERNEL);
390 if (!qp->orq) {
391 rc = -ENOMEM;
392 DP_NOTICE(p_hwfn,
393 "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
394 rc);
395 return rc;
396 }
397
398 /* Get SPQ entry */
399 memset(&init_data, 0, sizeof(init_data));
400 init_data.cid = qp->icid + 1;
401 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
402 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
403
404 rc = qed_sp_init_request(p_hwfn, &p_ent,
405 ROCE_RAMROD_CREATE_QP,
406 PROTOCOLID_ROCE, &init_data);
407 if (rc)
408 goto err;
409
410 p_ramrod = &p_ent->ramrod.roce_create_qp_req;
411
412 p_ramrod->flags = 0;
413
414 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
415 SET_FIELD(p_ramrod->flags,
416 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
417
418 SET_FIELD(p_ramrod->flags,
419 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
420 qp->fmr_and_reserved_lkey);
421
422 SET_FIELD(p_ramrod->flags,
423 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
424
425 SET_FIELD(p_ramrod->flags,
426 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
427
428 SET_FIELD(p_ramrod->flags,
429 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
430 qp->rnr_retry_cnt);
431
7bfb399e
YB
432 SET_FIELD(p_ramrod->flags,
433 ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG,
434 qed_rdma_is_xrc_qp(qp));
435
ff937b91
YB
436 SET_FIELD(p_ramrod->flags2,
437 ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE, qp->edpm_mode);
438
f1093940
RA
439 p_ramrod->max_ord = qp->max_rd_atomic_req;
440 p_ramrod->traffic_class = qp->traffic_class_tos;
441 p_ramrod->hop_limit = qp->hop_limit_ttl;
442 p_ramrod->orq_num_pages = qp->orq_num_pages;
443 p_ramrod->p_key = cpu_to_le16(qp->pkey);
444 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
445 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
446 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
447 p_ramrod->mtu = cpu_to_le16(qp->mtu);
448 p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
449 p_ramrod->pd = cpu_to_le16(qp->pd);
450 p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
451 DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
452 DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
453 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
454 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
455 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
456 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
457 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
be086e7c
MY
458 p_ramrod->cq_cid =
459 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
f1093940 460
61be82b0
DB
461 tc = qed_roce_get_qp_tc(p_hwfn, qp);
462 regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
463 low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
464 DP_VERBOSE(p_hwfn, QED_MSG_SP,
465 "qp icid %u pqs: regular_latency %u low_latency %u\n",
466 qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
467 low_latency_queue - CM_TX_PQ_BASE);
be086e7c
MY
468 p_ramrod->regular_latency_phy_queue =
469 cpu_to_le16(regular_latency_queue);
470 p_ramrod->low_latency_phy_queue =
61be82b0 471 cpu_to_le16(low_latency_queue);
f1093940 472
f1093940
RA
473 p_ramrod->dpi = cpu_to_le16(qp->dpi);
474
475 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
476 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
477
478 p_ramrod->udp_src_port = qp->udp_src_port;
479 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
480 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
481 qp->stats_queue;
482
483 rc = qed_spq_post(p_hwfn, p_ent, NULL);
f1093940
RA
484 if (rc)
485 goto err;
486
487 qp->req_offloaded = true;
be086e7c
MY
488 proto = p_hwfn->p_rdma_info->proto;
489 qed_roce_set_real_cid(p_hwfn,
490 qp->icid + 1 -
491 qed_cxt_get_proto_cid_start(p_hwfn, proto));
f1093940
RA
492
493 return rc;
494
495err:
496 DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
497 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
498 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
499 qp->orq, qp->orq_phys_addr);
500 return rc;
501}
502
503static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
504 struct qed_rdma_qp *qp,
505 bool move_to_err, u32 modify_flags)
506{
507 struct roce_modify_qp_resp_ramrod_data *p_ramrod;
508 struct qed_sp_init_data init_data;
509 struct qed_spq_entry *p_ent;
510 int rc;
511
7bfb399e
YB
512 if (!qp->has_resp)
513 return 0;
514
f1093940
RA
515 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
516
517 if (move_to_err && !qp->resp_offloaded)
518 return 0;
519
520 /* Get SPQ entry */
521 memset(&init_data, 0, sizeof(init_data));
522 init_data.cid = qp->icid;
523 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
524 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
525
526 rc = qed_sp_init_request(p_hwfn, &p_ent,
527 ROCE_EVENT_MODIFY_QP,
528 PROTOCOLID_ROCE, &init_data);
529 if (rc) {
530 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
531 return rc;
532 }
533
534 p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
535
536 p_ramrod->flags = 0;
537
538 SET_FIELD(p_ramrod->flags,
539 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
540
541 SET_FIELD(p_ramrod->flags,
542 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
543 qp->incoming_rdma_read_en);
544
545 SET_FIELD(p_ramrod->flags,
546 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
547 qp->incoming_rdma_write_en);
548
549 SET_FIELD(p_ramrod->flags,
550 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
551 qp->incoming_atomic_en);
552
553 SET_FIELD(p_ramrod->flags,
554 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
555 qp->e2e_flow_control_en);
556
557 SET_FIELD(p_ramrod->flags,
558 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
559 GET_FIELD(modify_flags,
560 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
561
562 SET_FIELD(p_ramrod->flags,
563 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
564 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
565
566 SET_FIELD(p_ramrod->flags,
567 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
568 GET_FIELD(modify_flags,
569 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
570
571 SET_FIELD(p_ramrod->flags,
572 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
573 GET_FIELD(modify_flags,
574 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
575
576 SET_FIELD(p_ramrod->flags,
577 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
578 GET_FIELD(modify_flags,
579 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
580
581 p_ramrod->fields = 0;
582 SET_FIELD(p_ramrod->fields,
583 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
584 qp->min_rnr_nak_timer);
585
586 p_ramrod->max_ird = qp->max_rd_atomic_resp;
587 p_ramrod->traffic_class = qp->traffic_class_tos;
588 p_ramrod->hop_limit = qp->hop_limit_ttl;
589 p_ramrod->p_key = cpu_to_le16(qp->pkey);
590 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
591 p_ramrod->mtu = cpu_to_le16(qp->mtu);
592 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
593 rc = qed_spq_post(p_hwfn, p_ent, NULL);
594
595 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
596 return rc;
597}
598
599static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
600 struct qed_rdma_qp *qp,
601 bool move_to_sqd,
602 bool move_to_err, u32 modify_flags)
603{
604 struct roce_modify_qp_req_ramrod_data *p_ramrod;
605 struct qed_sp_init_data init_data;
606 struct qed_spq_entry *p_ent;
607 int rc;
608
7bfb399e
YB
609 if (!qp->has_req)
610 return 0;
611
f1093940
RA
612 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
613
614 if (move_to_err && !(qp->req_offloaded))
615 return 0;
616
617 /* Get SPQ entry */
618 memset(&init_data, 0, sizeof(init_data));
619 init_data.cid = qp->icid + 1;
620 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
621 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
622
623 rc = qed_sp_init_request(p_hwfn, &p_ent,
624 ROCE_EVENT_MODIFY_QP,
625 PROTOCOLID_ROCE, &init_data);
626 if (rc) {
627 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
628 return rc;
629 }
630
631 p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
632
633 p_ramrod->flags = 0;
634
635 SET_FIELD(p_ramrod->flags,
636 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
637
638 SET_FIELD(p_ramrod->flags,
639 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
640
641 SET_FIELD(p_ramrod->flags,
642 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
643 qp->sqd_async);
644
645 SET_FIELD(p_ramrod->flags,
646 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
647 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
648
649 SET_FIELD(p_ramrod->flags,
650 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
651 GET_FIELD(modify_flags,
652 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
653
654 SET_FIELD(p_ramrod->flags,
655 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
656 GET_FIELD(modify_flags,
657 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
658
659 SET_FIELD(p_ramrod->flags,
660 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
661 GET_FIELD(modify_flags,
662 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
663
664 SET_FIELD(p_ramrod->flags,
665 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
666 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
667
668 SET_FIELD(p_ramrod->flags,
669 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
670 GET_FIELD(modify_flags,
671 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
672
673 p_ramrod->fields = 0;
674 SET_FIELD(p_ramrod->fields,
675 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
676
677 SET_FIELD(p_ramrod->fields,
678 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
679 qp->rnr_retry_cnt);
680
681 p_ramrod->max_ord = qp->max_rd_atomic_req;
682 p_ramrod->traffic_class = qp->traffic_class_tos;
683 p_ramrod->hop_limit = qp->hop_limit_ttl;
684 p_ramrod->p_key = cpu_to_le16(qp->pkey);
685 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
686 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
687 p_ramrod->mtu = cpu_to_le16(qp->mtu);
688 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
689 rc = qed_spq_post(p_hwfn, p_ent, NULL);
690
691 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
692 return rc;
693}
694
695static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
696 struct qed_rdma_qp *qp,
be086e7c 697 u32 *cq_prod)
f1093940
RA
698{
699 struct roce_destroy_qp_resp_output_params *p_ramrod_res;
700 struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
701 struct qed_sp_init_data init_data;
702 struct qed_spq_entry *p_ent;
703 dma_addr_t ramrod_res_phys;
704 int rc;
705
7bfb399e
YB
706 if (!qp->has_resp) {
707 *cq_prod = 0;
708 return 0;
709 }
710
f1093940 711 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
be086e7c
MY
712 *cq_prod = qp->cq_prod;
713
714 if (!qp->resp_offloaded) {
715 /* If a responder was never offload, we need to free the cids
716 * allocated in create_qp as a FW async event will never arrive
717 */
718 u32 cid;
719
720 cid = qp->icid -
721 qed_cxt_get_proto_cid_start(p_hwfn,
722 p_hwfn->p_rdma_info->proto);
723 qed_roce_free_cid_pair(p_hwfn, (u16)cid);
724
f1093940 725 return 0;
be086e7c 726 }
f1093940
RA
727
728 /* Get SPQ entry */
729 memset(&init_data, 0, sizeof(init_data));
730 init_data.cid = qp->icid;
731 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
732 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
733
734 rc = qed_sp_init_request(p_hwfn, &p_ent,
735 ROCE_RAMROD_DESTROY_QP,
736 PROTOCOLID_ROCE, &init_data);
737 if (rc)
738 return rc;
739
740 p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
741
745e5ad5
AR
742 p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
743 sizeof(*p_ramrod_res),
744 &ramrod_res_phys, GFP_KERNEL);
f1093940
RA
745
746 if (!p_ramrod_res) {
747 rc = -ENOMEM;
748 DP_NOTICE(p_hwfn,
749 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
750 rc);
fb5e7438 751 qed_sp_destroy_request(p_hwfn, p_ent);
f1093940
RA
752 return rc;
753 }
754
755 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
756
757 rc = qed_spq_post(p_hwfn, p_ent, NULL);
758 if (rc)
759 goto err;
760
be086e7c
MY
761 *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
762 qp->cq_prod = *cq_prod;
f1093940
RA
763
764 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
765 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
766 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
767 qp->irq, qp->irq_phys_addr);
768
769 qp->resp_offloaded = false;
770
771 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
772
773err:
774 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
775 sizeof(struct roce_destroy_qp_resp_output_params),
776 p_ramrod_res, ramrod_res_phys);
777
778 return rc;
779}
780
781static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
d52c89f1 782 struct qed_rdma_qp *qp)
f1093940
RA
783{
784 struct roce_destroy_qp_req_output_params *p_ramrod_res;
785 struct roce_destroy_qp_req_ramrod_data *p_ramrod;
786 struct qed_sp_init_data init_data;
787 struct qed_spq_entry *p_ent;
788 dma_addr_t ramrod_res_phys;
789 int rc = -ENOMEM;
790
7bfb399e
YB
791 if (!qp->has_req)
792 return 0;
793
f1093940
RA
794 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
795
796 if (!qp->req_offloaded)
797 return 0;
798
799 p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
800 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
801 sizeof(*p_ramrod_res),
802 &ramrod_res_phys, GFP_KERNEL);
803 if (!p_ramrod_res) {
804 DP_NOTICE(p_hwfn,
805 "qed destroy requester failed: cannot allocate memory (ramrod)\n");
806 return rc;
807 }
808
809 /* Get SPQ entry */
810 memset(&init_data, 0, sizeof(init_data));
811 init_data.cid = qp->icid + 1;
812 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
813 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
814
815 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
816 PROTOCOLID_ROCE, &init_data);
817 if (rc)
818 goto err;
819
820 p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
821 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
822
823 rc = qed_spq_post(p_hwfn, p_ent, NULL);
824 if (rc)
825 goto err;
826
f1093940
RA
827
828 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
829 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
830 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
831 qp->orq, qp->orq_phys_addr);
832
833 qp->req_offloaded = false;
834
835 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
836
837err:
838 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
839 p_ramrod_res, ramrod_res_phys);
840
841 return rc;
842}
843
b71b9afd
KM
844int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
845 struct qed_rdma_qp *qp,
846 struct qed_rdma_query_qp_out_params *out_params)
f1093940
RA
847{
848 struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
849 struct roce_query_qp_req_output_params *p_req_ramrod_res;
850 struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
851 struct roce_query_qp_req_ramrod_data *p_req_ramrod;
852 struct qed_sp_init_data init_data;
853 dma_addr_t resp_ramrod_res_phys;
854 dma_addr_t req_ramrod_res_phys;
855 struct qed_spq_entry *p_ent;
856 bool rq_err_state;
857 bool sq_err_state;
858 bool sq_draining;
859 int rc = -ENOMEM;
860
861 if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
862 /* We can't send ramrod to the fw since this qp wasn't offloaded
863 * to the fw yet
864 */
865 out_params->draining = false;
866 out_params->rq_psn = qp->rq_psn;
867 out_params->sq_psn = qp->sq_psn;
868 out_params->state = qp->cur_state;
869
870 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
871 return 0;
872 }
873
874 if (!(qp->resp_offloaded)) {
875 DP_NOTICE(p_hwfn,
df80b8fb 876 "The responder's qp should be offloaded before requester's\n");
f1093940
RA
877 return -EINVAL;
878 }
879
880 /* Send a query responder ramrod to FW to get RQ-PSN and state */
745e5ad5
AR
881 p_resp_ramrod_res =
882 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
883 sizeof(*p_resp_ramrod_res),
884 &resp_ramrod_res_phys, GFP_KERNEL);
f1093940
RA
885 if (!p_resp_ramrod_res) {
886 DP_NOTICE(p_hwfn,
887 "qed query qp failed: cannot allocate memory (ramrod)\n");
888 return rc;
889 }
890
891 /* Get SPQ entry */
892 memset(&init_data, 0, sizeof(init_data));
893 init_data.cid = qp->icid;
894 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
895 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
896 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
897 PROTOCOLID_ROCE, &init_data);
898 if (rc)
899 goto err_resp;
900
901 p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
902 DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
903
904 rc = qed_spq_post(p_hwfn, p_ent, NULL);
905 if (rc)
906 goto err_resp;
907
f1093940 908 out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
0500a70d 909 rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags),
f1093940
RA
910 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
911
c5212b94
RA
912 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
913 p_resp_ramrod_res, resp_ramrod_res_phys);
914
f1093940
RA
915 if (!(qp->req_offloaded)) {
916 /* Don't send query qp for the requester */
917 out_params->sq_psn = qp->sq_psn;
918 out_params->draining = false;
919
920 if (rq_err_state)
921 qp->cur_state = QED_ROCE_QP_STATE_ERR;
922
923 out_params->state = qp->cur_state;
924
925 return 0;
926 }
927
928 /* Send a query requester ramrod to FW to get SQ-PSN and state */
745e5ad5 929 p_req_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
f1093940
RA
930 sizeof(*p_req_ramrod_res),
931 &req_ramrod_res_phys,
932 GFP_KERNEL);
933 if (!p_req_ramrod_res) {
934 rc = -ENOMEM;
935 DP_NOTICE(p_hwfn,
936 "qed query qp failed: cannot allocate memory (ramrod)\n");
937 return rc;
938 }
939
940 /* Get SPQ entry */
941 init_data.cid = qp->icid + 1;
942 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
943 PROTOCOLID_ROCE, &init_data);
944 if (rc)
945 goto err_req;
946
947 p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
948 DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
949
950 rc = qed_spq_post(p_hwfn, p_ent, NULL);
951 if (rc)
952 goto err_req;
953
f1093940
RA
954 out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
955 sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
956 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
957 sq_draining =
958 GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
959 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
960
c5212b94
RA
961 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
962 p_req_ramrod_res, req_ramrod_res_phys);
963
f1093940
RA
964 out_params->draining = false;
965
be086e7c 966 if (rq_err_state || sq_err_state)
f1093940 967 qp->cur_state = QED_ROCE_QP_STATE_ERR;
f1093940
RA
968 else if (sq_draining)
969 out_params->draining = true;
970 out_params->state = qp->cur_state;
971
972 return 0;
973
974err_req:
975 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
976 p_req_ramrod_res, req_ramrod_res_phys);
977 return rc;
978err_resp:
979 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
980 p_resp_ramrod_res, resp_ramrod_res_phys);
981 return rc;
982}
983
b71b9afd 984int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
f1093940 985{
be086e7c 986 u32 cq_prod;
f1093940
RA
987 int rc;
988
989 /* Destroys the specified QP */
990 if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
991 (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
992 (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
993 DP_NOTICE(p_hwfn,
994 "QP must be in error, reset or init state before destroying it\n");
995 return -EINVAL;
996 }
997
300c0d7c
RA
998 if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
999 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
be086e7c 1000 &cq_prod);
300c0d7c
RA
1001 if (rc)
1002 return rc;
1003
1004 /* Send destroy requester ramrod */
d52c89f1 1005 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
300c0d7c
RA
1006 if (rc)
1007 return rc;
300c0d7c 1008 }
f1093940
RA
1009
1010 return 0;
1011}
1012
b71b9afd
KM
1013int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
1014 struct qed_rdma_qp *qp,
1015 enum qed_roce_qp_state prev_state,
1016 struct qed_rdma_modify_qp_in_params *params)
f1093940 1017{
f1093940
RA
1018 int rc = 0;
1019
1020 /* Perform additional operations according to the current state and the
1021 * next state
1022 */
1023 if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
1024 (prev_state == QED_ROCE_QP_STATE_RESET)) &&
1025 (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
1026 /* Init->RTR or Reset->RTR */
1027 rc = qed_roce_sp_create_responder(p_hwfn, qp);
1028 return rc;
1029 } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
1030 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1031 /* RTR-> RTS */
1032 rc = qed_roce_sp_create_requester(p_hwfn, qp);
1033 if (rc)
1034 return rc;
1035
1036 /* Send modify responder ramrod */
1037 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1038 params->modify_flags);
1039 return rc;
1040 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
1041 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1042 /* RTS->RTS */
1043 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1044 params->modify_flags);
1045 if (rc)
1046 return rc;
1047
1048 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1049 params->modify_flags);
1050 return rc;
1051 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
1052 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
1053 /* RTS->SQD */
1054 rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
1055 params->modify_flags);
1056 return rc;
1057 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
1058 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
1059 /* SQD->SQD */
1060 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1061 params->modify_flags);
1062 if (rc)
1063 return rc;
1064
1065 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1066 params->modify_flags);
1067 return rc;
1068 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
1069 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1070 /* SQD->RTS */
1071 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1072 params->modify_flags);
1073 if (rc)
1074 return rc;
1075
1076 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1077 params->modify_flags);
1078
1079 return rc;
ba0154e9 1080 } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) {
f1093940
RA
1081 /* ->ERR */
1082 rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
1083 params->modify_flags);
1084 if (rc)
1085 return rc;
1086
1087 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
1088 params->modify_flags);
1089 return rc;
1090 } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
1091 /* Any state -> RESET */
be086e7c
MY
1092 u32 cq_prod;
1093
1094 /* Send destroy responder ramrod */
1095 rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
1096 qp,
be086e7c 1097 &cq_prod);
f1093940 1098
f1093940
RA
1099 if (rc)
1100 return rc;
1101
be086e7c
MY
1102 qp->cq_prod = cq_prod;
1103
d52c89f1 1104 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
f1093940
RA
1105 } else {
1106 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
1107 }
1108
1109 return rc;
1110}
1111
be086e7c
MY
1112static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
1113{
1114 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1115 u32 start_cid, cid, xcid;
1116
1117 /* an even icid belongs to a responder while an odd icid belongs to a
1118 * requester. The 'cid' received as an input can be either. We calculate
1119 * the "partner" icid and call it xcid. Only if both are free then the
1120 * "cid" map can be cleared.
1121 */
1122 start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
1123 cid = icid - start_cid;
1124 xcid = cid ^ 1;
1125
1126 spin_lock_bh(&p_rdma_info->lock);
1127
1128 qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
1129 if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
1130 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
1131 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
1132 }
1133
1134 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1135}
1136
9331dad1
MY
1137void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1138{
1139 u8 val;
1140
1141 /* if any QPs are already active, we want to disable DPM, since their
1142 * context information contains information from before the latest DCBx
1143 * update. Otherwise enable it.
1144 */
1145 val = qed_rdma_allocated_qps(p_hwfn) ? true : false;
1146 p_hwfn->dcbx_no_edpm = (u8)val;
1147
1148 qed_rdma_dpm_conf(p_hwfn, p_ptt);
1149}
1150
b71b9afd 1151int qed_roce_setup(struct qed_hwfn *p_hwfn)
abd49676 1152{
b71b9afd
KM
1153 return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE,
1154 qed_roce_async_event);
abd49676
RA
1155}
1156
67b40dcc
KM
1157int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1158{
1159 u32 ll2_ethertype_en;
1160
1161 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
1162
1163 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
1164
1165 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
1166 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
1167 (ll2_ethertype_en | 0x01));
1168
1169 if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
1170 DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
1171 return -EINVAL;
1172 }
1173
1174 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
1175 return 0;
1176}