Merge tag 'vfs-6.7.misc' of gitolite.kernel.org:pub/scm/linux/kernel/git/vfs/vfs
[linux-block.git] / drivers / infiniband / hw / bnxt_re / qplib_rcfw.c
CommitLineData
1ac5a404
SX
1/*
2 * Broadcom NetXtreme-E RoCE driver.
3 *
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 *
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Description: RDMA Controller HW interface
37 */
08920b8f
JP
38
39#define dev_fmt(fmt) "QPLIB: " fmt
40
1ac5a404
SX
41#include <linux/interrupt.h>
42#include <linux/spinlock.h>
43#include <linux/pci.h>
44#include <linux/prefetch.h>
cc1ec769
DS
45#include <linux/delay.h>
46
1ac5a404
SX
47#include "roce_hsi.h"
48#include "qplib_res.h"
49#include "qplib_rcfw.h"
f218d67e
SX
50#include "qplib_sp.h"
51#include "qplib_fp.h"
c682c6ed 52#include "qplib_tlv.h"
f218d67e 53
53c2a706 54static void bnxt_qplib_service_creq(struct tasklet_struct *t);
1ac5a404 55
3022cc15
KD
56/**
57 * bnxt_qplib_map_rc - map return type based on opcode
c6c0052d 58 * @opcode: roce slow path opcode
3022cc15 59 *
25ed2d40
KD
60 * case #1
61 * Firmware initiated error recovery is a safe state machine and
62 * driver can consider all the underlying rdma resources are free.
63 * In this state, it is safe to return success for opcodes related to
64 * destroying rdma resources (like destroy qp, destroy cq etc.).
3022cc15 65 *
25ed2d40
KD
66 * case #2
67 * If driver detect potential firmware stall, it is not safe state machine
68 * and the driver can not consider all the underlying rdma resources are
69 * freed.
70 * In this state, it is not safe to return success for opcodes related to
71 * destroying rdma resources (like destroy qp, destroy cq etc.).
72 *
73 * Scope of this helper function is only for case #1.
3022cc15
KD
74 *
75 * Returns:
76 * 0 to communicate success to caller.
77 * Non zero error code to communicate failure to caller.
78 */
79static int bnxt_qplib_map_rc(u8 opcode)
80{
81 switch (opcode) {
82 case CMDQ_BASE_OPCODE_DESTROY_QP:
83 case CMDQ_BASE_OPCODE_DESTROY_SRQ:
84 case CMDQ_BASE_OPCODE_DESTROY_CQ:
85 case CMDQ_BASE_OPCODE_DEALLOCATE_KEY:
86 case CMDQ_BASE_OPCODE_DEREGISTER_MR:
87 case CMDQ_BASE_OPCODE_DELETE_GID:
88 case CMDQ_BASE_OPCODE_DESTROY_QP1:
89 case CMDQ_BASE_OPCODE_DESTROY_AH:
90 case CMDQ_BASE_OPCODE_DEINITIALIZE_FW:
91 case CMDQ_BASE_OPCODE_MODIFY_ROCE_CC:
92 case CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE:
93 return 0;
94 default:
95 return -ETIMEDOUT;
96 }
97}
98
f0c875ff
KD
99/**
100 * bnxt_re_is_fw_stalled - Check firmware health
c6c0052d
LR
101 * @rcfw: rcfw channel instance of rdev
102 * @cookie: cookie to track the command
f0c875ff
KD
103 *
104 * If firmware has not responded any rcfw command within
105 * rcfw->max_timeout, consider firmware as stalled.
106 *
107 * Returns:
108 * 0 if firmware is responding
109 * -ENODEV if firmware is not responding
110 */
111static int bnxt_re_is_fw_stalled(struct bnxt_qplib_rcfw *rcfw,
830f93f4 112 u16 cookie)
f0c875ff
KD
113{
114 struct bnxt_qplib_cmdq_ctx *cmdq;
830f93f4 115 struct bnxt_qplib_crsqe *crsqe;
f0c875ff 116
830f93f4 117 crsqe = &rcfw->crsqe_tbl[cookie];
f0c875ff
KD
118 cmdq = &rcfw->cmdq;
119
120 if (time_after(jiffies, cmdq->last_seen +
121 (rcfw->max_timeout * HZ))) {
122 dev_warn_ratelimited(&rcfw->pdev->dev,
123 "%s: FW STALL Detected. cmdq[%#x]=%#x waited (%d > %d) msec active %d ",
830f93f4 124 __func__, cookie, crsqe->opcode,
f0c875ff
KD
125 jiffies_to_msecs(jiffies - cmdq->last_seen),
126 rcfw->max_timeout * 1000,
830f93f4 127 crsqe->is_in_used);
f0c875ff
KD
128 return -ENODEV;
129 }
130
131 return 0;
132}
133
8cf1d12a
KD
134/**
135 * __wait_for_resp - Don't hold the cpu context and wait for response
c6c0052d
LR
136 * @rcfw: rcfw channel instance of rdev
137 * @cookie: cookie to track the command
8cf1d12a
KD
138 *
139 * Wait for command completion in sleepable context.
140 *
141 * Returns:
142 * 0 if command is completed by firmware.
143 * Non zero error code for rest of the case.
144 */
830f93f4 145static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
1ac5a404 146{
cee0c7bb 147 struct bnxt_qplib_cmdq_ctx *cmdq;
bcfee4ce 148 struct bnxt_qplib_crsqe *crsqe;
f0c875ff 149 int ret;
1ac5a404 150
cee0c7bb 151 cmdq = &rcfw->cmdq;
bcfee4ce 152 crsqe = &rcfw->crsqe_tbl[cookie];
8cf1d12a
KD
153
154 do {
3022cc15 155 if (test_bit(ERR_DEVICE_DETACHED, &cmdq->flags))
830f93f4 156 return bnxt_qplib_map_rc(crsqe->opcode);
b6c72566
KD
157 if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
158 return -ETIMEDOUT;
3022cc15 159
691eb7c6 160 wait_event_timeout(cmdq->waitq,
bcfee4ce 161 !crsqe->is_in_used ||
a0027852 162 test_bit(ERR_DEVICE_DETACHED, &cmdq->flags),
f0c875ff
KD
163 msecs_to_jiffies(rcfw->max_timeout * 1000));
164
bcfee4ce 165 if (!crsqe->is_in_used)
8cf1d12a
KD
166 return 0;
167
168 bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet);
169
bcfee4ce 170 if (!crsqe->is_in_used)
8cf1d12a
KD
171 return 0;
172
830f93f4 173 ret = bnxt_re_is_fw_stalled(rcfw, cookie);
f0c875ff
KD
174 if (ret)
175 return ret;
b6c72566 176
8cf1d12a 177 } while (true);
1ac5a404
SX
178};
179
8cf1d12a
KD
180/**
181 * __block_for_resp - hold the cpu context and wait for response
c6c0052d
LR
182 * @rcfw: rcfw channel instance of rdev
183 * @cookie: cookie to track the command
8cf1d12a
KD
184 *
185 * This function will hold the cpu (non-sleepable context) and
186 * wait for command completion. Maximum holding interval is 8 second.
187 *
188 * Returns:
189 * -ETIMEOUT if command is not completed in specific time interval.
190 * 0 if command is completed by firmware.
191 */
830f93f4 192static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
1ac5a404 193{
8cf1d12a 194 struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
bcfee4ce 195 struct bnxt_qplib_crsqe *crsqe;
8cf1d12a 196 unsigned long issue_time = 0;
1ac5a404 197
8cf1d12a 198 issue_time = jiffies;
bcfee4ce 199 crsqe = &rcfw->crsqe_tbl[cookie];
8cf1d12a 200
1ac5a404 201 do {
3022cc15 202 if (test_bit(ERR_DEVICE_DETACHED, &cmdq->flags))
830f93f4 203 return bnxt_qplib_map_rc(crsqe->opcode);
b6c72566
KD
204 if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
205 return -ETIMEDOUT;
3022cc15 206
b9b43ad3 207 udelay(1);
8cf1d12a 208
53c2a706 209 bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet);
bcfee4ce 210 if (!crsqe->is_in_used)
8cf1d12a
KD
211 return 0;
212
213 } while (time_before(jiffies, issue_time + (8 * HZ)));
214
215 return -ETIMEDOUT;
1ac5a404
SX
216};
217
84911cf3 218/* __send_message_no_waiter - get cookie and post the message.
c6c0052d
LR
219 * @rcfw: rcfw channel instance of rdev
220 * @msg: qplib message internal
84911cf3
KD
221 *
222 * This function will just post and don't bother about completion.
223 * Current design of this function is -
224 * user must hold the completion queue hwq->lock.
225 * user must have used existing completion and free the resources.
226 * this function will not check queue full condition.
227 * this function will explicitly set is_waiter_alive=false.
228 * current use case is - send destroy_ah if create_ah is return
229 * after waiter of create_ah is lost. It can be extended for other
230 * use case as well.
231 *
232 * Returns: Nothing
233 *
234 */
235static void __send_message_no_waiter(struct bnxt_qplib_rcfw *rcfw,
236 struct bnxt_qplib_cmdqmsg *msg)
237{
238 struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
239 struct bnxt_qplib_hwq *hwq = &cmdq->hwq;
240 struct bnxt_qplib_crsqe *crsqe;
241 struct bnxt_qplib_cmdqe *cmdqe;
242 u32 sw_prod, cmdq_prod;
bcfee4ce 243 u16 cookie;
84911cf3
KD
244 u32 bsize;
245 u8 *preq;
246
247 cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;
84911cf3 248 __set_cmdq_base_cookie(msg->req, msg->req_sz, cpu_to_le16(cookie));
bcfee4ce 249 crsqe = &rcfw->crsqe_tbl[cookie];
84911cf3
KD
250
251 /* Set cmd_size in terms of 16B slots in req. */
252 bsize = bnxt_qplib_set_cmd_slots(msg->req);
253 /* GET_CMD_SIZE would return number of slots in either case of tlv
254 * and non-tlv commands after call to bnxt_qplib_set_cmd_slots()
255 */
256 crsqe->is_internal_cmd = true;
257 crsqe->is_waiter_alive = false;
bcfee4ce 258 crsqe->is_in_used = true;
84911cf3
KD
259 crsqe->req_size = __get_cmdq_base_cmd_size(msg->req, msg->req_sz);
260
261 preq = (u8 *)msg->req;
262 do {
263 /* Locate the next cmdq slot */
264 sw_prod = HWQ_CMP(hwq->prod, hwq);
265 cmdqe = bnxt_qplib_get_qe(hwq, sw_prod, NULL);
266 /* Copy a segment of the req cmd to the cmdq */
267 memset(cmdqe, 0, sizeof(*cmdqe));
268 memcpy(cmdqe, preq, min_t(u32, bsize, sizeof(*cmdqe)));
269 preq += min_t(u32, bsize, sizeof(*cmdqe));
270 bsize -= min_t(u32, bsize, sizeof(*cmdqe));
271 hwq->prod++;
272 } while (bsize > 0);
273 cmdq->seq_num++;
274
275 cmdq_prod = hwq->prod;
276 atomic_inc(&rcfw->timeout_send);
277 /* ring CMDQ DB */
278 wmb();
279 writel(cmdq_prod, cmdq->cmdq_mbox.prod);
280 writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
281}
282
ff015bcd 283static int __send_message(struct bnxt_qplib_rcfw *rcfw,
147394db 284 struct bnxt_qplib_cmdqmsg *msg, u8 opcode)
1ac5a404 285{
691eb7c6 286 u32 bsize, free_slots, required_slots;
159cf95e 287 struct bnxt_qplib_cmdq_ctx *cmdq;
cee0c7bb 288 struct bnxt_qplib_crsqe *crsqe;
fddcbbb0 289 struct bnxt_qplib_cmdqe *cmdqe;
159cf95e 290 struct bnxt_qplib_hwq *hwq;
1ac5a404 291 u32 sw_prod, cmdq_prod;
cee0c7bb 292 struct pci_dev *pdev;
1ac5a404 293 unsigned long flags;
bcfee4ce 294 u16 cookie;
1ac5a404
SX
295 u8 *preq;
296
159cf95e
KD
297 cmdq = &rcfw->cmdq;
298 hwq = &cmdq->hwq;
cee0c7bb
DS
299 pdev = rcfw->pdev;
300
1ac5a404
SX
301 /* Cmdq are in 16-byte units, each request can consume 1 or more
302 * cmdqe
303 */
cee0c7bb 304 spin_lock_irqsave(&hwq->lock, flags);
159cf95e
KD
305 required_slots = bnxt_qplib_get_cmd_slots(msg->req);
306 free_slots = HWQ_FREE_SLOTS(hwq);
307 cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;
bcfee4ce 308 crsqe = &rcfw->crsqe_tbl[cookie];
159cf95e 309
bcfee4ce 310 if (required_slots >= free_slots) {
159cf95e
KD
311 dev_info_ratelimited(&pdev->dev,
312 "CMDQ is full req/free %d/%d!",
313 required_slots, free_slots);
cee0c7bb 314 spin_unlock_irqrestore(&hwq->lock, flags);
cc1ec769 315 return -EAGAIN;
1ac5a404 316 }
ff015bcd 317 if (msg->block)
1ac5a404 318 cookie |= RCFW_CMD_IS_BLOCKING;
c682c6ed 319 __set_cmdq_base_cookie(msg->req, msg->req_sz, cpu_to_le16(cookie));
bcfee4ce 320
f13bcef0 321 bsize = bnxt_qplib_set_cmd_slots(msg->req);
159cf95e 322 crsqe->free_slots = free_slots;
ff015bcd 323 crsqe->resp = (struct creq_qp_event *)msg->resp;
f13bcef0 324 crsqe->resp->cookie = cpu_to_le16(cookie);
84911cf3 325 crsqe->is_internal_cmd = false;
691eb7c6 326 crsqe->is_waiter_alive = true;
bcfee4ce
KD
327 crsqe->is_in_used = true;
328 crsqe->opcode = opcode;
329
c682c6ed
SX
330 crsqe->req_size = __get_cmdq_base_cmd_size(msg->req, msg->req_sz);
331 if (__get_cmdq_base_resp_size(msg->req, msg->req_sz) && msg->sb) {
ff015bcd 332 struct bnxt_qplib_rcfw_sbuf *sbuf = msg->sb;
159cf95e
KD
333
334 __set_cmdq_base_resp_addr(msg->req, msg->req_sz,
335 cpu_to_le64(sbuf->dma_addr));
c682c6ed 336 __set_cmdq_base_resp_size(msg->req, msg->req_sz,
159cf95e 337 ALIGN(sbuf->size,
c9f3e4e1
SX
338 BNXT_QPLIB_CMDQE_UNITS) /
339 BNXT_QPLIB_CMDQE_UNITS);
1ac5a404 340 }
cc1ec769 341
ff015bcd 342 preq = (u8 *)msg->req;
1ac5a404 343 do {
1ac5a404 344 /* Locate the next cmdq slot */
cee0c7bb 345 sw_prod = HWQ_CMP(hwq->prod, hwq);
fddcbbb0 346 cmdqe = bnxt_qplib_get_qe(hwq, sw_prod, NULL);
1ac5a404
SX
347 /* Copy a segment of the req cmd to the cmdq */
348 memset(cmdqe, 0, sizeof(*cmdqe));
f13bcef0
SX
349 memcpy(cmdqe, preq, min_t(u32, bsize, sizeof(*cmdqe)));
350 preq += min_t(u32, bsize, sizeof(*cmdqe));
351 bsize -= min_t(u32, bsize, sizeof(*cmdqe));
cee0c7bb 352 hwq->prod++;
f13bcef0 353 } while (bsize > 0);
cee0c7bb 354 cmdq->seq_num++;
1ac5a404 355
0af91306 356 cmdq_prod = hwq->prod & 0xFFFF;
cee0c7bb 357 if (test_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags)) {
cc1ec769
DS
358 /* The very first doorbell write
359 * is required to set this flag
360 * which prompts the FW to reset
361 * its internal pointers
1ac5a404 362 */
a0ddc2ec 363 cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
cee0c7bb 364 clear_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
1ac5a404 365 }
1ac5a404 366 /* ring CMDQ DB */
cc1ec769 367 wmb();
cee0c7bb
DS
368 writel(cmdq_prod, cmdq->cmdq_mbox.prod);
369 writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
cee0c7bb 370 spin_unlock_irqrestore(&hwq->lock, flags);
1ac5a404 371 /* Return the CREQ response pointer */
cc1ec769 372 return 0;
1ac5a404
SX
373}
374
354f5bd9
KD
375/**
376 * __poll_for_resp - self poll completion for rcfw command
c6c0052d
LR
377 * @rcfw: rcfw channel instance of rdev
378 * @cookie: cookie to track the command
354f5bd9
KD
379 *
380 * It works same as __wait_for_resp except this function will
381 * do self polling in sort interval since interrupt is disabled.
382 * This function can not be called from non-sleepable context.
383 *
384 * Returns:
385 * -ETIMEOUT if command is not completed in specific time interval.
386 * 0 if command is completed by firmware.
387 */
830f93f4 388static int __poll_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
354f5bd9
KD
389{
390 struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
bcfee4ce 391 struct bnxt_qplib_crsqe *crsqe;
354f5bd9 392 unsigned long issue_time;
f0c875ff 393 int ret;
354f5bd9 394
354f5bd9 395 issue_time = jiffies;
bcfee4ce 396 crsqe = &rcfw->crsqe_tbl[cookie];
354f5bd9
KD
397
398 do {
399 if (test_bit(ERR_DEVICE_DETACHED, &cmdq->flags))
830f93f4 400 return bnxt_qplib_map_rc(crsqe->opcode);
b6c72566
KD
401 if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
402 return -ETIMEDOUT;
354f5bd9
KD
403
404 usleep_range(1000, 1001);
405
406 bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet);
bcfee4ce 407 if (!crsqe->is_in_used)
354f5bd9 408 return 0;
b6c72566 409 if (jiffies_to_msecs(jiffies - issue_time) >
f0c875ff 410 (rcfw->max_timeout * 1000)) {
830f93f4 411 ret = bnxt_re_is_fw_stalled(rcfw, cookie);
f0c875ff
KD
412 if (ret)
413 return ret;
b6c72566 414 }
354f5bd9
KD
415 } while (true);
416};
417
159cf95e 418static int __send_message_basic_sanity(struct bnxt_qplib_rcfw *rcfw,
830f93f4
KD
419 struct bnxt_qplib_cmdqmsg *msg,
420 u8 opcode)
159cf95e
KD
421{
422 struct bnxt_qplib_cmdq_ctx *cmdq;
159cf95e
KD
423
424 cmdq = &rcfw->cmdq;
159cf95e
KD
425
426 /* Prevent posting if f/w is not in a state to process */
427 if (test_bit(ERR_DEVICE_DETACHED, &rcfw->cmdq.flags))
25ed2d40 428 return bnxt_qplib_map_rc(opcode);
b6c72566
KD
429 if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
430 return -ETIMEDOUT;
159cf95e
KD
431
432 if (test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
433 opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
434 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
435 return -EINVAL;
436 }
437
438 if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
439 (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
440 opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
441 opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
442 dev_err(&rcfw->pdev->dev,
443 "QPLIB: RCFW not initialized, reject opcode 0x%x",
444 opcode);
445 return -EOPNOTSUPP;
446 }
447
448 return 0;
449}
450
84911cf3
KD
451/* This function will just post and do not bother about completion */
452static void __destroy_timedout_ah(struct bnxt_qplib_rcfw *rcfw,
453 struct creq_create_ah_resp *create_ah_resp)
454{
455 struct bnxt_qplib_cmdqmsg msg = {};
456 struct cmdq_destroy_ah req = {};
457
458 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
459 CMDQ_BASE_OPCODE_DESTROY_AH,
460 sizeof(req));
461 req.ah_cid = create_ah_resp->xid;
462 msg.req = (struct cmdq_base *)&req;
463 msg.req_sz = sizeof(req);
464 __send_message_no_waiter(rcfw, &msg);
465 dev_info_ratelimited(&rcfw->pdev->dev,
466 "From %s: ah_cid = %d timeout_send %d\n",
467 __func__, req.ah_cid,
468 atomic_read(&rcfw->timeout_send));
469}
470
65288a22
KD
471/**
472 * __bnxt_qplib_rcfw_send_message - qplib interface to send
473 * and complete rcfw command.
c6c0052d
LR
474 * @rcfw: rcfw channel instance of rdev
475 * @msg: qplib message internal
65288a22
KD
476 *
477 * This function does not account shadow queue depth. It will send
478 * all the command unconditionally as long as send queue is not full.
479 *
480 * Returns:
481 * 0 if command completed by firmware.
482 * Non zero if the command is not completed by firmware.
483 */
484static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
485 struct bnxt_qplib_cmdqmsg *msg)
cc1ec769 486{
ff015bcd 487 struct creq_qp_event *evnt = (struct creq_qp_event *)msg->resp;
691eb7c6
KD
488 struct bnxt_qplib_crsqe *crsqe;
489 unsigned long flags;
bcfee4ce 490 u16 cookie;
14611b9b 491 int rc;
159cf95e 492 u8 opcode;
cc1ec769 493
159cf95e 494 opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz);
6845485f 495
830f93f4 496 rc = __send_message_basic_sanity(rcfw, msg, opcode);
159cf95e 497 if (rc)
25ed2d40 498 return rc;
159cf95e 499
147394db 500 rc = __send_message(rcfw, msg, opcode);
159cf95e
KD
501 if (rc)
502 return rc;
503
504 cookie = le16_to_cpu(__get_cmdq_base_cookie(msg->req, msg->req_sz))
505 & RCFW_MAX_COOKIE_VALUE;
cc1ec769 506
ff015bcd 507 if (msg->block)
830f93f4 508 rc = __block_for_resp(rcfw, cookie);
354f5bd9 509 else if (atomic_read(&rcfw->rcfw_intr_enabled))
830f93f4 510 rc = __wait_for_resp(rcfw, cookie);
354f5bd9 511 else
830f93f4 512 rc = __poll_for_resp(rcfw, cookie);
cc1ec769 513
691eb7c6
KD
514 if (rc) {
515 spin_lock_irqsave(&rcfw->cmdq.hwq.lock, flags);
bcfee4ce 516 crsqe = &rcfw->crsqe_tbl[cookie];
691eb7c6 517 crsqe->is_waiter_alive = false;
b6c72566
KD
518 if (rc == -ENODEV)
519 set_bit(FIRMWARE_STALL_DETECTED, &rcfw->cmdq.flags);
691eb7c6
KD
520 spin_unlock_irqrestore(&rcfw->cmdq.hwq.lock, flags);
521 return -ETIMEDOUT;
522 }
523
cc1ec769
DS
524 if (evnt->status) {
525 /* failed with status */
08920b8f 526 dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
cc1ec769
DS
527 cookie, opcode, evnt->status);
528 rc = -EFAULT;
529 }
530
531 return rc;
532}
65288a22
KD
533
534/**
535 * bnxt_qplib_rcfw_send_message - qplib interface to send
536 * and complete rcfw command.
c6c0052d
LR
537 * @rcfw: rcfw channel instance of rdev
538 * @msg: qplib message internal
65288a22
KD
539 *
540 * Driver interact with Firmware through rcfw channel/slow path in two ways.
541 * a. Blocking rcfw command send. In this path, driver cannot hold
542 * the context for longer period since it is holding cpu until
543 * command is not completed.
544 * b. Non-blocking rcfw command send. In this path, driver can hold the
545 * context for longer period. There may be many pending command waiting
546 * for completion because of non-blocking nature.
547 *
548 * Driver will use shadow queue depth. Current queue depth of 8K
549 * (due to size of rcfw message there can be actual ~4K rcfw outstanding)
550 * is not optimal for rcfw command processing in firmware.
551 *
552 * Restrict at max #RCFW_CMD_NON_BLOCKING_SHADOW_QD Non-Blocking rcfw commands.
553 * Allow all blocking commands until there is no queue full.
554 *
555 * Returns:
556 * 0 if command completed by firmware.
557 * Non zero if the command is not completed by firmware.
558 */
559int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
560 struct bnxt_qplib_cmdqmsg *msg)
561{
562 int ret;
563
564 if (!msg->block) {
565 down(&rcfw->rcfw_inflight);
566 ret = __bnxt_qplib_rcfw_send_message(rcfw, msg);
567 up(&rcfw->rcfw_inflight);
568 } else {
569 ret = __bnxt_qplib_rcfw_send_message(rcfw, msg);
570 }
571
572 return ret;
573}
574
1ac5a404
SX
575/* Completions */
576static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
577 struct creq_func_event *func_event)
578{
cee0c7bb
DS
579 int rc;
580
1ac5a404
SX
581 switch (func_event->event) {
582 case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
583 break;
584 case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
585 break;
586 case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
587 break;
588 case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
589 break;
590 case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
591 break;
592 case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
593 break;
594 case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
595 break;
596 case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
597 /* SRQ ctx error, call srq_handler??
598 * But there's no SRQ handle!
599 */
600 break;
601 case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
602 break;
603 case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
604 break;
605 case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
606 break;
607 case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
608 break;
609 case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
610 break;
611 default:
612 return -EINVAL;
613 }
cee0c7bb
DS
614
615 rc = rcfw->creq.aeq_handler(rcfw, (void *)func_event, NULL);
616 return rc;
1ac5a404
SX
617}
618
619static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
3099bcdc
KD
620 struct creq_qp_event *qp_event,
621 u32 *num_wait)
1ac5a404 622{
f218d67e 623 struct creq_qp_error_notification *err_event;
cee0c7bb
DS
624 struct bnxt_qplib_hwq *hwq = &rcfw->cmdq.hwq;
625 struct bnxt_qplib_crsqe *crsqe;
691eb7c6 626 u32 qp_id, tbl_indx, req_size;
f218d67e 627 struct bnxt_qplib_qp *qp;
bcfee4ce 628 u16 cookie, blocked = 0;
691eb7c6 629 bool is_waiter_alive;
cee0c7bb
DS
630 struct pci_dev *pdev;
631 unsigned long flags;
3099bcdc 632 u32 wait_cmds = 0;
cee0c7bb 633 int rc = 0;
1ac5a404 634
cee0c7bb 635 pdev = rcfw->pdev;
1ac5a404
SX
636 switch (qp_event->event) {
637 case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
f218d67e
SX
638 err_event = (struct creq_qp_error_notification *)qp_event;
639 qp_id = le32_to_cpu(err_event->xid);
84cf229f
SX
640 tbl_indx = map_qp_id_to_tbl_indx(qp_id, rcfw);
641 qp = rcfw->qp_tbl[tbl_indx].qp_handle;
cee0c7bb
DS
642 dev_dbg(&pdev->dev, "Received QP error notification\n");
643 dev_dbg(&pdev->dev,
08920b8f 644 "qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
f218d67e
SX
645 qp_id, err_event->req_err_state_reason,
646 err_event->res_err_state_reason);
d6d5c599
SB
647 if (!qp)
648 break;
f218d67e 649 bnxt_qplib_mark_qp_error(qp);
cee0c7bb 650 rc = rcfw->creq.aeq_handler(rcfw, qp_event, qp);
1ac5a404
SX
651 break;
652 default:
d455f29f
SX
653 /*
654 * Command Response
655 * cmdq->lock needs to be acquired to synchronie
656 * the command send and completion reaping. This function
657 * is always called with creq->lock held. Using
658 * the nested variant of spin_lock.
659 *
660 */
661
cee0c7bb 662 spin_lock_irqsave_nested(&hwq->lock, flags,
d455f29f 663 SINGLE_DEPTH_NESTING);
cc1ec769 664 cookie = le16_to_cpu(qp_event->cookie);
1ac5a404
SX
665 blocked = cookie & RCFW_CMD_IS_BLOCKING;
666 cookie &= RCFW_MAX_COOKIE_VALUE;
bcfee4ce 667 crsqe = &rcfw->crsqe_tbl[cookie];
b6c72566
KD
668
669 if (WARN_ONCE(test_bit(FIRMWARE_STALL_DETECTED,
670 &rcfw->cmdq.flags),
671 "QPLIB: Unreponsive rcfw channel detected.!!")) {
672 dev_info(&pdev->dev,
673 "rcfw timedout: cookie = %#x, free_slots = %d",
674 cookie, crsqe->free_slots);
675 spin_unlock_irqrestore(&hwq->lock, flags);
676 return rc;
677 }
678
84911cf3
KD
679 if (crsqe->is_internal_cmd && !qp_event->status)
680 atomic_dec(&rcfw->timeout_send);
691eb7c6
KD
681
682 if (crsqe->is_waiter_alive) {
9fc5f9a9 683 if (crsqe->resp) {
691eb7c6 684 memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
9fc5f9a9
SX
685 /* Insert write memory barrier to ensure that
686 * response data is copied before clearing the
687 * flags
688 */
689 smp_wmb();
690 }
691eb7c6
KD
691 if (!blocked)
692 wait_cmds++;
693 }
694
695 req_size = crsqe->req_size;
696 is_waiter_alive = crsqe->is_waiter_alive;
697
cc1ec769 698 crsqe->req_size = 0;
691eb7c6
KD
699 if (!is_waiter_alive)
700 crsqe->resp = NULL;
cc1ec769 701
9fc5f9a9
SX
702 crsqe->is_in_used = false;
703
691eb7c6 704 hwq->cons += req_size;
84911cf3
KD
705
706 /* This is a case to handle below scenario -
707 * Create AH is completed successfully by firmware,
708 * but completion took more time and driver already lost
709 * the context of create_ah from caller.
710 * We have already return failure for create_ah verbs,
711 * so let's destroy the same address vector since it is
712 * no more used in stack. We don't care about completion
713 * in __send_message_no_waiter.
714 * If destroy_ah is failued by firmware, there will be AH
715 * resource leak and relatively not critical + unlikely
716 * scenario. Current design is not to handle such case.
717 */
718 if (!is_waiter_alive && !qp_event->status &&
719 qp_event->event == CREQ_QP_EVENT_EVENT_CREATE_AH)
720 __destroy_timedout_ah(rcfw,
721 (struct creq_create_ah_resp *)
722 qp_event);
cee0c7bb 723 spin_unlock_irqrestore(&hwq->lock, flags);
1ac5a404 724 }
3099bcdc 725 *num_wait += wait_cmds;
cee0c7bb 726 return rc;
1ac5a404
SX
727}
728
729/* SP - CREQ Completion handlers */
53c2a706 730static void bnxt_qplib_service_creq(struct tasklet_struct *t)
1ac5a404 731{
53c2a706 732 struct bnxt_qplib_rcfw *rcfw = from_tasklet(rcfw, t, creq.creq_tasklet);
cee0c7bb 733 struct bnxt_qplib_creq_ctx *creq = &rcfw->creq;
b353ce55 734 u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
cee0c7bb 735 struct bnxt_qplib_hwq *hwq = &creq->hwq;
fddcbbb0 736 struct creq_base *creqe;
1ac5a404
SX
737 u32 sw_cons, raw_cons;
738 unsigned long flags;
3099bcdc 739 u32 num_wakeup = 0;
1ac5a404 740
cc1ec769 741 /* Service the CREQ until budget is over */
cee0c7bb
DS
742 spin_lock_irqsave(&hwq->lock, flags);
743 raw_cons = hwq->cons;
cc1ec769 744 while (budget > 0) {
cee0c7bb 745 sw_cons = HWQ_CMP(raw_cons, hwq);
fddcbbb0 746 creqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
cee0c7bb 747 if (!CREQ_CMP_VALID(creqe, raw_cons, hwq->max_elements))
1ac5a404 748 break;
9b40183c
SK
749 /* The valid test of the entry must be done first before
750 * reading any further.
751 */
752 dma_rmb();
b6c72566 753 rcfw->cmdq.last_seen = jiffies;
1ac5a404
SX
754
755 type = creqe->type & CREQ_BASE_TYPE_MASK;
756 switch (type) {
757 case CREQ_BASE_TYPE_QP_EVENT:
cc1ec769 758 bnxt_qplib_process_qp_event
3099bcdc
KD
759 (rcfw, (struct creq_qp_event *)creqe,
760 &num_wakeup);
cee0c7bb 761 creq->stats.creq_qp_event_processed++;
1ac5a404
SX
762 break;
763 case CREQ_BASE_TYPE_FUNC_EVENT:
764 if (!bnxt_qplib_process_func_event
765 (rcfw, (struct creq_func_event *)creqe))
cee0c7bb 766 creq->stats.creq_func_event_processed++;
1ac5a404 767 else
08920b8f
JP
768 dev_warn(&rcfw->pdev->dev,
769 "aeqe:%#x Not handled\n", type);
1ac5a404
SX
770 break;
771 default:
f2bd4d09
DS
772 if (type != ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT)
773 dev_warn(&rcfw->pdev->dev,
774 "creqe with event 0x%x not handled\n",
775 type);
1ac5a404
SX
776 break;
777 }
778 raw_cons++;
cc1ec769 779 budget--;
1ac5a404 780 }
cc1ec769 781
cee0c7bb
DS
782 if (hwq->cons != raw_cons) {
783 hwq->cons = raw_cons;
6f53196b
DS
784 bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo,
785 rcfw->res->cctx, true);
1ac5a404 786 }
cee0c7bb 787 spin_unlock_irqrestore(&hwq->lock, flags);
3099bcdc
KD
788 if (num_wakeup)
789 wake_up_nr(&rcfw->cmdq.waitq, num_wakeup);
1ac5a404
SX
790}
791
792static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
793{
794 struct bnxt_qplib_rcfw *rcfw = dev_instance;
cee0c7bb 795 struct bnxt_qplib_creq_ctx *creq;
cee0c7bb 796 struct bnxt_qplib_hwq *hwq;
1ac5a404
SX
797 u32 sw_cons;
798
cee0c7bb
DS
799 creq = &rcfw->creq;
800 hwq = &creq->hwq;
1ac5a404 801 /* Prefetch the CREQ element */
cee0c7bb 802 sw_cons = HWQ_CMP(hwq->cons, hwq);
fddcbbb0 803 prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
1ac5a404 804
cee0c7bb 805 tasklet_schedule(&creq->creq_tasklet);
1ac5a404
SX
806
807 return IRQ_HANDLED;
808}
809
810/* RCFW */
811int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
812{
ff015bcd
SX
813 struct creq_deinitialize_fw_resp resp = {};
814 struct cmdq_deinitialize_fw req = {};
815 struct bnxt_qplib_cmdqmsg msg = {};
cc1ec769 816 int rc;
1ac5a404 817
e576adf5
SX
818 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
819 CMDQ_BASE_OPCODE_DEINITIALIZE_FW,
820 sizeof(req));
ff015bcd
SX
821 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL,
822 sizeof(req), sizeof(resp), 0);
823 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
cc1ec769
DS
824 if (rc)
825 return rc;
1ac5a404 826
cee0c7bb 827 clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
1ac5a404
SX
828 return 0;
829}
830
1ac5a404
SX
831int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
832 struct bnxt_qplib_ctx *ctx, int is_virtfn)
833{
ff015bcd
SX
834 struct creq_initialize_fw_resp resp = {};
835 struct cmdq_initialize_fw req = {};
836 struct bnxt_qplib_cmdqmsg msg = {};
99bf84e2 837 u8 pgsz, lvl;
cc1ec769 838 int rc;
1ac5a404 839
e576adf5
SX
840 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
841 CMDQ_BASE_OPCODE_INITIALIZE_FW,
842 sizeof(req));
c354dff0
DS
843 /* Supply (log-base-2-of-host-page-size - base-page-shift)
844 * to bono to adjust the doorbell page sizes.
845 */
846 req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
847 RCFW_DBR_BASE_PAGE_SHIFT);
1ac5a404 848 /*
e0387e1d
DS
849 * Gen P5 devices doesn't require this allocation
850 * as the L2 driver does the same for RoCE also.
851 * Also, VFs need not setup the HW context area, PF
1ac5a404
SX
852 * shall setup this area for VF. Skipping the
853 * HW programming
854 */
39c48c51 855 if (is_virtfn)
1ac5a404 856 goto skip_ctx_setup;
39c48c51
DS
857 if (bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
858 goto config_vf_res;
1ac5a404 859
99bf84e2
DS
860 lvl = ctx->qpc_tbl.level;
861 pgsz = bnxt_qplib_base_pg_size(&ctx->qpc_tbl);
862 req.qpc_pg_size_qpc_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
863 lvl;
864 lvl = ctx->mrw_tbl.level;
865 pgsz = bnxt_qplib_base_pg_size(&ctx->mrw_tbl);
866 req.mrw_pg_size_mrw_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
867 lvl;
868 lvl = ctx->srqc_tbl.level;
869 pgsz = bnxt_qplib_base_pg_size(&ctx->srqc_tbl);
870 req.srq_pg_size_srq_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
871 lvl;
872 lvl = ctx->cq_tbl.level;
873 pgsz = bnxt_qplib_base_pg_size(&ctx->cq_tbl);
874 req.cq_pg_size_cq_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
875 lvl;
876 lvl = ctx->tim_tbl.level;
877 pgsz = bnxt_qplib_base_pg_size(&ctx->tim_tbl);
878 req.tim_pg_size_tim_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
879 lvl;
880 lvl = ctx->tqm_ctx.pde.level;
881 pgsz = bnxt_qplib_base_pg_size(&ctx->tqm_ctx.pde);
882 req.tqm_pg_size_tqm_lvl = (pgsz << CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
883 lvl;
1ac5a404
SX
884 req.qpc_page_dir =
885 cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
886 req.mrw_page_dir =
887 cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
888 req.srq_page_dir =
889 cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
890 req.cq_page_dir =
891 cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
892 req.tim_page_dir =
893 cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
894 req.tqm_page_dir =
0c4dcd60 895 cpu_to_le64(ctx->tqm_ctx.pde.pbl[PBL_LVL_0].pg_map_arr[0]);
1ac5a404
SX
896
897 req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
898 req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
899 req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
900 req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
901
39c48c51 902config_vf_res:
1ac5a404
SX
903 req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
904 req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
905 req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
906 req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
907 req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
908
909skip_ctx_setup:
910 req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
ff015bcd
SX
911 bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
912 rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
cc1ec769
DS
913 if (rc)
914 return rc;
cee0c7bb 915 set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
1ac5a404
SX
916 return 0;
917}
918
919void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
920{
f218d67e 921 kfree(rcfw->qp_tbl);
cc1ec769 922 kfree(rcfw->crsqe_tbl);
cee0c7bb
DS
923 bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
924 bnxt_qplib_free_hwq(rcfw->res, &rcfw->creq.hwq);
1ac5a404
SX
925 rcfw->pdev = NULL;
926}
927
0c4dcd60 928int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
f218d67e 929 struct bnxt_qplib_rcfw *rcfw,
bd1c24cc 930 struct bnxt_qplib_ctx *ctx,
f218d67e 931 int qp_tbl_sz)
1ac5a404 932{
0c4dcd60
DS
933 struct bnxt_qplib_hwq_attr hwq_attr = {};
934 struct bnxt_qplib_sg_info sginfo = {};
cee0c7bb
DS
935 struct bnxt_qplib_cmdq_ctx *cmdq;
936 struct bnxt_qplib_creq_ctx *creq;
0c4dcd60
DS
937
938 rcfw->pdev = res->pdev;
cee0c7bb
DS
939 cmdq = &rcfw->cmdq;
940 creq = &rcfw->creq;
0c4dcd60
DS
941 rcfw->res = res;
942
943 sginfo.pgsize = PAGE_SIZE;
944 sginfo.pgshft = PAGE_SHIFT;
945
946 hwq_attr.sginfo = &sginfo;
947 hwq_attr.res = rcfw->res;
948 hwq_attr.depth = BNXT_QPLIB_CREQE_MAX_CNT;
949 hwq_attr.stride = BNXT_QPLIB_CREQE_UNITS;
950 hwq_attr.type = bnxt_qplib_get_hwq_type(res);
951
cee0c7bb 952 if (bnxt_qplib_alloc_init_hwq(&creq->hwq, &hwq_attr)) {
1ac5a404 953 dev_err(&rcfw->pdev->dev,
08920b8f 954 "HW channel CREQ allocation failed\n");
1ac5a404
SX
955 goto fail;
956 }
258ee043
KD
957
958 rcfw->cmdq_depth = BNXT_QPLIB_CMDQE_MAX_CNT;
bd1c24cc 959
0c4dcd60 960 sginfo.pgsize = bnxt_qplib_cmdqe_page_size(rcfw->cmdq_depth);
0af91306 961 hwq_attr.depth = rcfw->cmdq_depth & 0x7FFFFFFF;
0c4dcd60
DS
962 hwq_attr.stride = BNXT_QPLIB_CMDQE_UNITS;
963 hwq_attr.type = HWQ_TYPE_CTX;
cee0c7bb 964 if (bnxt_qplib_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) {
1ac5a404 965 dev_err(&rcfw->pdev->dev,
08920b8f 966 "HW channel CMDQ allocation failed\n");
1ac5a404
SX
967 goto fail;
968 }
969
cee0c7bb 970 rcfw->crsqe_tbl = kcalloc(cmdq->hwq.max_elements,
cc1ec769
DS
971 sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
972 if (!rcfw->crsqe_tbl)
1ac5a404
SX
973 goto fail;
974
84cf229f
SX
975 /* Allocate one extra to hold the QP1 entries */
976 rcfw->qp_tbl_size = qp_tbl_sz + 1;
977 rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
f218d67e
SX
978 GFP_KERNEL);
979 if (!rcfw->qp_tbl)
980 goto fail;
981
f0c875ff
KD
982 rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout;
983
1ac5a404
SX
984 return 0;
985
986fail:
987 bnxt_qplib_free_rcfw_channel(rcfw);
988 return -ENOMEM;
989}
990
6e04b103 991void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
1ac5a404 992{
cee0c7bb 993 struct bnxt_qplib_creq_ctx *creq;
b353ce55 994
cee0c7bb 995 creq = &rcfw->creq;
ab112ee7
SX
996
997 if (!creq->requested)
998 return;
999
29900bf3 1000 creq->requested = false;
6e04b103 1001 /* Mask h/w interrupts */
6f53196b 1002 bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false);
6e04b103 1003 /* Sync with last running IRQ-handler */
cee0c7bb 1004 synchronize_irq(creq->msix_vec);
ab112ee7 1005 free_irq(creq->msix_vec, rcfw);
ff2e4bfd
KA
1006 kfree(creq->irq_name);
1007 creq->irq_name = NULL;
354f5bd9 1008 atomic_set(&rcfw->rcfw_intr_enabled, 0);
29900bf3
SX
1009 if (kill)
1010 tasklet_kill(&creq->creq_tasklet);
1011 tasklet_disable(&creq->creq_tasklet);
6e04b103
DS
1012}
1013
1014void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
1015{
cee0c7bb
DS
1016 struct bnxt_qplib_creq_ctx *creq;
1017 struct bnxt_qplib_cmdq_ctx *cmdq;
6e04b103 1018
cee0c7bb
DS
1019 creq = &rcfw->creq;
1020 cmdq = &rcfw->cmdq;
1021 /* Make sure the HW channel is stopped! */
6e04b103
DS
1022 bnxt_qplib_rcfw_stop_irq(rcfw, true);
1023
cee0c7bb
DS
1024 iounmap(cmdq->cmdq_mbox.reg.bar_reg);
1025 iounmap(creq->creq_db.reg.bar_reg);
1ac5a404 1026
cee0c7bb
DS
1027 cmdq->cmdq_mbox.reg.bar_reg = NULL;
1028 creq->creq_db.reg.bar_reg = NULL;
1029 creq->aeq_handler = NULL;
1030 creq->msix_vec = 0;
1ac5a404
SX
1031}
1032
6e04b103
DS
1033int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
1034 bool need_init)
1035{
cee0c7bb 1036 struct bnxt_qplib_creq_ctx *creq;
ff2e4bfd 1037 struct bnxt_qplib_res *res;
6e04b103
DS
1038 int rc;
1039
cee0c7bb 1040 creq = &rcfw->creq;
ff2e4bfd 1041 res = rcfw->res;
cee0c7bb
DS
1042
1043 if (creq->requested)
6e04b103
DS
1044 return -EFAULT;
1045
cee0c7bb 1046 creq->msix_vec = msix_vector;
6e04b103 1047 if (need_init)
53c2a706 1048 tasklet_setup(&creq->creq_tasklet, bnxt_qplib_service_creq);
6e04b103 1049 else
cee0c7bb 1050 tasklet_enable(&creq->creq_tasklet);
ff2e4bfd
KA
1051
1052 creq->irq_name = kasprintf(GFP_KERNEL, "bnxt_re-creq@pci:%s",
1053 pci_name(res->pdev));
1054 if (!creq->irq_name)
1055 return -ENOMEM;
cee0c7bb 1056 rc = request_irq(creq->msix_vec, bnxt_qplib_creq_irq, 0,
ff2e4bfd 1057 creq->irq_name, rcfw);
ab112ee7 1058 if (rc) {
ff2e4bfd
KA
1059 kfree(creq->irq_name);
1060 creq->irq_name = NULL;
ab112ee7 1061 tasklet_disable(&creq->creq_tasklet);
6e04b103 1062 return rc;
ab112ee7 1063 }
cee0c7bb 1064 creq->requested = true;
6f53196b 1065
ff2e4bfd 1066 bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, true);
354f5bd9 1067 atomic_inc(&rcfw->rcfw_intr_enabled);
6e04b103
DS
1068
1069 return 0;
1070}
1071
b021186b 1072static int bnxt_qplib_map_cmdq_mbox(struct bnxt_qplib_rcfw *rcfw)
1ac5a404 1073{
cee0c7bb
DS
1074 struct bnxt_qplib_cmdq_mbox *mbox;
1075 resource_size_t bar_reg;
0c4dcd60 1076 struct pci_dev *pdev;
1ac5a404 1077
0c4dcd60 1078 pdev = rcfw->pdev;
cee0c7bb
DS
1079 mbox = &rcfw->cmdq.cmdq_mbox;
1080
1081 mbox->reg.bar_id = RCFW_COMM_PCI_BAR_REGION;
1082 mbox->reg.len = RCFW_COMM_SIZE;
1083 mbox->reg.bar_base = pci_resource_start(pdev, mbox->reg.bar_id);
1084 if (!mbox->reg.bar_base) {
1085 dev_err(&pdev->dev,
1086 "QPLIB: CMDQ BAR region %d resc start is 0!\n",
1087 mbox->reg.bar_id);
1ac5a404 1088 return -ENOMEM;
cee0c7bb 1089 }
1ac5a404 1090
cee0c7bb
DS
1091 bar_reg = mbox->reg.bar_base + RCFW_COMM_BASE_OFFSET;
1092 mbox->reg.len = RCFW_COMM_SIZE;
1093 mbox->reg.bar_reg = ioremap(bar_reg, mbox->reg.len);
1094 if (!mbox->reg.bar_reg) {
1095 dev_err(&pdev->dev,
1096 "QPLIB: CMDQ BAR region %d mapping failed\n",
1097 mbox->reg.bar_id);
1ac5a404
SX
1098 return -ENOMEM;
1099 }
1100
b021186b
KD
1101 mbox->prod = (void __iomem *)(mbox->reg.bar_reg +
1102 RCFW_PF_VF_COMM_PROD_OFFSET);
cee0c7bb 1103 mbox->db = (void __iomem *)(mbox->reg.bar_reg + RCFW_COMM_TRIG_OFFSET);
b021186b 1104 return 0;
cee0c7bb 1105}
1ac5a404 1106
cee0c7bb
DS
1107static int bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw *rcfw, u32 reg_offt)
1108{
1109 struct bnxt_qplib_creq_db *creq_db;
1110 resource_size_t bar_reg;
1111 struct pci_dev *pdev;
1ac5a404 1112
cee0c7bb
DS
1113 pdev = rcfw->pdev;
1114 creq_db = &rcfw->creq.creq_db;
1115
1116 creq_db->reg.bar_id = RCFW_COMM_CONS_PCI_BAR_REGION;
1117 creq_db->reg.bar_base = pci_resource_start(pdev, creq_db->reg.bar_id);
1118 if (!creq_db->reg.bar_id)
1119 dev_err(&pdev->dev,
1120 "QPLIB: CREQ BAR region %d resc start is 0!",
1121 creq_db->reg.bar_id);
1122
1123 bar_reg = creq_db->reg.bar_base + reg_offt;
b353ce55 1124 /* Unconditionally map 8 bytes to support 57500 series */
cee0c7bb
DS
1125 creq_db->reg.len = 8;
1126 creq_db->reg.bar_reg = ioremap(bar_reg, creq_db->reg.len);
1127 if (!creq_db->reg.bar_reg) {
1128 dev_err(&pdev->dev,
1129 "QPLIB: CREQ BAR region %d mapping failed",
1130 creq_db->reg.bar_id);
1ac5a404
SX
1131 return -ENOMEM;
1132 }
6f53196b
DS
1133 creq_db->dbinfo.db = creq_db->reg.bar_reg;
1134 creq_db->dbinfo.hwq = &rcfw->creq.hwq;
1135 creq_db->dbinfo.xid = rcfw->creq.ring_id;
cee0c7bb
DS
1136 return 0;
1137}
1138
1139static void bnxt_qplib_start_rcfw(struct bnxt_qplib_rcfw *rcfw)
1140{
1141 struct bnxt_qplib_cmdq_ctx *cmdq;
1142 struct bnxt_qplib_creq_ctx *creq;
1143 struct bnxt_qplib_cmdq_mbox *mbox;
1144 struct cmdq_init init = {0};
1145
1146 cmdq = &rcfw->cmdq;
1147 creq = &rcfw->creq;
1148 mbox = &cmdq->cmdq_mbox;
1149
1150 init.cmdq_pbl = cpu_to_le64(cmdq->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
1151 init.cmdq_size_cmdq_lvl =
1152 cpu_to_le16(((rcfw->cmdq_depth <<
1153 CMDQ_INIT_CMDQ_SIZE_SFT) &
1154 CMDQ_INIT_CMDQ_SIZE_MASK) |
1155 ((cmdq->hwq.level <<
1156 CMDQ_INIT_CMDQ_LVL_SFT) &
1157 CMDQ_INIT_CMDQ_LVL_MASK));
1158 init.creq_ring_id = cpu_to_le16(creq->ring_id);
1159 /* Write to the Bono mailbox register */
1160 __iowrite32_copy(mbox->reg.bar_reg, &init, sizeof(init) / 4);
1161}
1ac5a404 1162
cee0c7bb
DS
1163int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
1164 int msix_vector,
b021186b 1165 int cp_bar_reg_off,
cee0c7bb
DS
1166 aeq_handler_t aeq_handler)
1167{
1168 struct bnxt_qplib_cmdq_ctx *cmdq;
1169 struct bnxt_qplib_creq_ctx *creq;
1170 int rc;
1171
1172 cmdq = &rcfw->cmdq;
1173 creq = &rcfw->creq;
1174
1175 /* Clear to defaults */
1176
1177 cmdq->seq_num = 0;
1178 set_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
1179 init_waitqueue_head(&cmdq->waitq);
1180
1181 creq->stats.creq_qp_event_processed = 0;
1182 creq->stats.creq_func_event_processed = 0;
1183 creq->aeq_handler = aeq_handler;
1184
b021186b 1185 rc = bnxt_qplib_map_cmdq_mbox(rcfw);
cee0c7bb
DS
1186 if (rc)
1187 return rc;
1188
1189 rc = bnxt_qplib_map_creq_db(rcfw, cp_bar_reg_off);
1190 if (rc)
1191 return rc;
1ac5a404 1192
6e04b103 1193 rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
1ac5a404
SX
1194 if (rc) {
1195 dev_err(&rcfw->pdev->dev,
08920b8f 1196 "Failed to request IRQ for CREQ rc = 0x%x\n", rc);
1ac5a404
SX
1197 bnxt_qplib_disable_rcfw_channel(rcfw);
1198 return rc;
1199 }
1ac5a404 1200
65288a22 1201 sema_init(&rcfw->rcfw_inflight, RCFW_CMD_NON_BLOCKING_SHADOW_QD);
cee0c7bb 1202 bnxt_qplib_start_rcfw(rcfw);
1ac5a404 1203
1ac5a404
SX
1204 return 0;
1205}