1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
4 #include <net/mana/gdma.h>
5 #include <net/mana/hw_channel.h>
6 #include <linux/vmalloc.h>
8 static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
10 struct gdma_resource *r = &hwc->inflight_msg_res;
16 spin_lock_irqsave(&r->lock, flags);
18 index = find_first_zero_bit(hwc->inflight_msg_res.map,
19 hwc->inflight_msg_res.size);
21 bitmap_set(hwc->inflight_msg_res.map, index, 1);
23 spin_unlock_irqrestore(&r->lock, flags);
30 static void mana_hwc_put_msg_index(struct hw_channel_context *hwc, u16 msg_id)
32 struct gdma_resource *r = &hwc->inflight_msg_res;
35 spin_lock_irqsave(&r->lock, flags);
36 bitmap_clear(hwc->inflight_msg_res.map, msg_id, 1);
37 spin_unlock_irqrestore(&r->lock, flags);
42 static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
43 const struct gdma_resp_hdr *resp_msg,
46 if (resp_len < sizeof(*resp_msg))
49 if (resp_len > caller_ctx->output_buflen)
55 static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
56 const struct gdma_resp_hdr *resp_msg)
58 struct hwc_caller_ctx *ctx;
61 if (!test_bit(resp_msg->response.hwc_msg_id,
62 hwc->inflight_msg_res.map)) {
63 dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
64 resp_msg->response.hwc_msg_id);
68 ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
69 err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
73 ctx->status_code = resp_msg->status;
75 memcpy(ctx->output_buf, resp_msg, resp_len);
78 complete(&ctx->comp_event);
81 static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
82 struct hwc_work_request *req)
84 struct device *dev = hwc_rxq->hwc->dev;
89 sge->address = (u64)req->buf_sge_addr;
90 sge->mem_key = hwc_rxq->msg_buf->gpa_mkey;
91 sge->size = req->buf_len;
93 memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
94 req->wqe_req.sgl = sge;
95 req->wqe_req.num_sge = 1;
96 req->wqe_req.client_data_unit = 0;
98 err = mana_gd_post_and_ring(hwc_rxq->gdma_wq, &req->wqe_req, NULL);
100 dev_err(dev, "Failed to post WQE on HWC RQ: %d\n", err);
104 static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
105 struct gdma_event *event)
107 struct hw_channel_context *hwc = ctx;
108 struct gdma_dev *gd = hwc->gdma_dev;
109 union hwc_init_type_data type_data;
110 union hwc_init_eq_id_db eq_db;
113 switch (event->type) {
114 case GDMA_EQE_HWC_INIT_EQ_ID_DB:
115 eq_db.as_uint32 = event->details[0];
116 hwc->cq->gdma_eq->id = eq_db.eq_id;
117 gd->doorbell = eq_db.doorbell;
120 case GDMA_EQE_HWC_INIT_DATA:
121 type_data.as_uint32 = event->details[0];
122 type = type_data.type;
123 val = type_data.value;
126 case HWC_INIT_DATA_CQID:
127 hwc->cq->gdma_cq->id = val;
130 case HWC_INIT_DATA_RQID:
131 hwc->rxq->gdma_wq->id = val;
134 case HWC_INIT_DATA_SQID:
135 hwc->txq->gdma_wq->id = val;
138 case HWC_INIT_DATA_QUEUE_DEPTH:
139 hwc->hwc_init_q_depth_max = (u16)val;
142 case HWC_INIT_DATA_MAX_REQUEST:
143 hwc->hwc_init_max_req_msg_size = val;
146 case HWC_INIT_DATA_MAX_RESPONSE:
147 hwc->hwc_init_max_resp_msg_size = val;
150 case HWC_INIT_DATA_MAX_NUM_CQS:
151 gd->gdma_context->max_num_cqs = val;
154 case HWC_INIT_DATA_PDID:
155 hwc->gdma_dev->pdid = val;
158 case HWC_INIT_DATA_GPA_MKEY:
159 hwc->rxq->msg_buf->gpa_mkey = val;
160 hwc->txq->msg_buf->gpa_mkey = val;
163 case HWC_INIT_DATA_PF_DEST_RQ_ID:
164 hwc->pf_dest_vrq_id = val;
167 case HWC_INIT_DATA_PF_DEST_CQ_ID:
168 hwc->pf_dest_vrcq_id = val;
174 case GDMA_EQE_HWC_INIT_DONE:
175 complete(&hwc->hwc_init_eqe_comp);
178 case GDMA_EQE_HWC_SOC_RECONFIG_DATA:
179 type_data.as_uint32 = event->details[0];
180 type = type_data.type;
181 val = type_data.value;
184 case HWC_DATA_CFG_HWC_TIMEOUT:
185 hwc->hwc_timeout = val;
189 dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type);
196 dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
197 /* Ignore unknown events, which should never happen. */
202 static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
203 const struct hwc_rx_oob *rx_oob)
205 struct hw_channel_context *hwc = ctx;
206 struct hwc_wq *hwc_rxq = hwc->rxq;
207 struct hwc_work_request *rx_req;
208 struct gdma_resp_hdr *resp;
209 struct gdma_wqe *dma_oob;
210 struct gdma_queue *rq;
211 struct gdma_sge *sge;
216 if (WARN_ON_ONCE(hwc_rxq->gdma_wq->id != gdma_rxq_id))
219 rq = hwc_rxq->gdma_wq;
220 wqe = mana_gd_get_wqe_ptr(rq, rx_oob->wqe_offset / GDMA_WQE_BU_SIZE);
221 dma_oob = (struct gdma_wqe *)wqe;
223 sge = (struct gdma_sge *)(wqe + 8 + dma_oob->inline_oob_size_div4 * 4);
225 /* Select the RX work request for virtual address and for reposting. */
226 rq_base_addr = hwc_rxq->msg_buf->mem_info.dma_handle;
227 rx_req_idx = (sge->address - rq_base_addr) / hwc->max_req_msg_size;
229 rx_req = &hwc_rxq->msg_buf->reqs[rx_req_idx];
230 resp = (struct gdma_resp_hdr *)rx_req->buf_va;
232 if (resp->response.hwc_msg_id >= hwc->num_inflight_msg) {
233 dev_err(hwc->dev, "HWC RX: wrong msg_id=%u\n",
234 resp->response.hwc_msg_id);
238 mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
240 /* Do no longer use 'resp', because the buffer is posted to the HW
241 * in the below mana_hwc_post_rx_wqe().
245 mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
248 static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
249 const struct hwc_rx_oob *rx_oob)
251 struct hw_channel_context *hwc = ctx;
252 struct hwc_wq *hwc_txq = hwc->txq;
254 WARN_ON_ONCE(!hwc_txq || hwc_txq->gdma_wq->id != gdma_txq_id);
257 static int mana_hwc_create_gdma_wq(struct hw_channel_context *hwc,
258 enum gdma_queue_type type, u64 queue_size,
259 struct gdma_queue **queue)
261 struct gdma_queue_spec spec = {};
263 if (type != GDMA_SQ && type != GDMA_RQ)
267 spec.monitor_avl_buf = false;
268 spec.queue_size = queue_size;
270 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
273 static int mana_hwc_create_gdma_cq(struct hw_channel_context *hwc,
275 void *ctx, gdma_cq_callback *cb,
276 struct gdma_queue *parent_eq,
277 struct gdma_queue **queue)
279 struct gdma_queue_spec spec = {};
282 spec.monitor_avl_buf = false;
283 spec.queue_size = queue_size;
284 spec.cq.context = ctx;
285 spec.cq.callback = cb;
286 spec.cq.parent_eq = parent_eq;
288 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
291 static int mana_hwc_create_gdma_eq(struct hw_channel_context *hwc,
293 void *ctx, gdma_eq_callback *cb,
294 struct gdma_queue **queue)
296 struct gdma_queue_spec spec = {};
299 spec.monitor_avl_buf = false;
300 spec.queue_size = queue_size;
301 spec.eq.context = ctx;
302 spec.eq.callback = cb;
303 spec.eq.log2_throttle_limit = DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ;
304 spec.eq.msix_index = 0;
306 return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
309 static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
311 struct hwc_rx_oob comp_data = {};
312 struct gdma_comp *completions;
313 struct hwc_cq *hwc_cq = ctx;
316 WARN_ON_ONCE(hwc_cq->gdma_cq != q_self);
318 completions = hwc_cq->comp_buf;
319 comp_read = mana_gd_poll_cq(q_self, completions, hwc_cq->queue_depth);
320 WARN_ON_ONCE(comp_read <= 0 || comp_read > hwc_cq->queue_depth);
322 for (i = 0; i < comp_read; ++i) {
323 comp_data = *(struct hwc_rx_oob *)completions[i].cqe_data;
325 if (completions[i].is_sq)
326 hwc_cq->tx_event_handler(hwc_cq->tx_event_ctx,
327 completions[i].wq_num,
330 hwc_cq->rx_event_handler(hwc_cq->rx_event_ctx,
331 completions[i].wq_num,
335 mana_gd_ring_cq(q_self, SET_ARM_BIT);
338 static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
340 kfree(hwc_cq->comp_buf);
343 mana_gd_destroy_queue(gc, hwc_cq->gdma_cq);
346 mana_gd_destroy_queue(gc, hwc_cq->gdma_eq);
351 static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
352 gdma_eq_callback *callback, void *ctx,
353 hwc_rx_event_handler_t *rx_ev_hdlr,
355 hwc_tx_event_handler_t *tx_ev_hdlr,
356 void *tx_ev_ctx, struct hwc_cq **hwc_cq_ptr)
358 struct gdma_queue *eq, *cq;
359 struct gdma_comp *comp_buf;
360 struct hwc_cq *hwc_cq;
361 u32 eq_size, cq_size;
364 eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
365 if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
366 eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
368 cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
369 if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
370 cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
372 hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
376 err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
378 dev_err(hwc->dev, "Failed to create HWC EQ for RQ: %d\n", err);
381 hwc_cq->gdma_eq = eq;
383 err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event,
386 dev_err(hwc->dev, "Failed to create HWC CQ for RQ: %d\n", err);
389 hwc_cq->gdma_cq = cq;
391 comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
398 hwc_cq->comp_buf = comp_buf;
399 hwc_cq->queue_depth = q_depth;
400 hwc_cq->rx_event_handler = rx_ev_hdlr;
401 hwc_cq->rx_event_ctx = rx_ev_ctx;
402 hwc_cq->tx_event_handler = tx_ev_hdlr;
403 hwc_cq->tx_event_ctx = tx_ev_ctx;
405 *hwc_cq_ptr = hwc_cq;
408 mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
412 static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
414 struct hwc_dma_buf **dma_buf_ptr)
416 struct gdma_context *gc = hwc->gdma_dev->gdma_context;
417 struct hwc_work_request *hwc_wr;
418 struct hwc_dma_buf *dma_buf;
419 struct gdma_mem_info *gmi;
426 dma_buf = kzalloc(struct_size(dma_buf, reqs, q_depth), GFP_KERNEL);
430 dma_buf->num_reqs = q_depth;
432 buf_size = PAGE_ALIGN(q_depth * max_msg_size);
434 gmi = &dma_buf->mem_info;
435 err = mana_gd_alloc_memory(gc, buf_size, gmi);
437 dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
441 virt_addr = dma_buf->mem_info.virt_addr;
442 base_pa = (u8 *)dma_buf->mem_info.dma_handle;
444 for (i = 0; i < q_depth; i++) {
445 hwc_wr = &dma_buf->reqs[i];
447 hwc_wr->buf_va = virt_addr + i * max_msg_size;
448 hwc_wr->buf_sge_addr = base_pa + i * max_msg_size;
450 hwc_wr->buf_len = max_msg_size;
453 *dma_buf_ptr = dma_buf;
460 static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
461 struct hwc_dma_buf *dma_buf)
466 mana_gd_free_memory(&dma_buf->mem_info);
471 static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
472 struct hwc_wq *hwc_wq)
474 mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
477 mana_gd_destroy_queue(hwc->gdma_dev->gdma_context,
483 static int mana_hwc_create_wq(struct hw_channel_context *hwc,
484 enum gdma_queue_type q_type, u16 q_depth,
485 u32 max_msg_size, struct hwc_cq *hwc_cq,
486 struct hwc_wq **hwc_wq_ptr)
488 struct gdma_queue *queue;
489 struct hwc_wq *hwc_wq;
493 WARN_ON(q_type != GDMA_SQ && q_type != GDMA_RQ);
495 if (q_type == GDMA_RQ)
496 queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth);
498 queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
500 if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
501 queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
503 hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
507 err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
512 hwc_wq->gdma_wq = queue;
513 hwc_wq->queue_depth = q_depth;
514 hwc_wq->hwc_cq = hwc_cq;
516 err = mana_hwc_alloc_dma_buf(hwc, q_depth, max_msg_size,
521 *hwc_wq_ptr = hwc_wq;
525 mana_hwc_destroy_wq(hwc, hwc_wq);
529 static int mana_hwc_post_tx_wqe(const struct hwc_wq *hwc_txq,
530 struct hwc_work_request *req,
531 u32 dest_virt_rq_id, u32 dest_virt_rcq_id,
534 struct device *dev = hwc_txq->hwc->dev;
535 struct hwc_tx_oob *tx_oob;
536 struct gdma_sge *sge;
539 if (req->msg_size == 0 || req->msg_size > req->buf_len) {
540 dev_err(dev, "wrong msg_size: %u, buf_len: %u\n",
541 req->msg_size, req->buf_len);
545 tx_oob = &req->tx_oob;
547 tx_oob->vrq_id = dest_virt_rq_id;
548 tx_oob->dest_vfid = 0;
549 tx_oob->vrcq_id = dest_virt_rcq_id;
550 tx_oob->vscq_id = hwc_txq->hwc_cq->gdma_cq->id;
551 tx_oob->loopback = false;
552 tx_oob->lso_override = false;
553 tx_oob->dest_pf = dest_pf;
554 tx_oob->vsq_id = hwc_txq->gdma_wq->id;
557 sge->address = (u64)req->buf_sge_addr;
558 sge->mem_key = hwc_txq->msg_buf->gpa_mkey;
559 sge->size = req->msg_size;
561 memset(&req->wqe_req, 0, sizeof(struct gdma_wqe_request));
562 req->wqe_req.sgl = sge;
563 req->wqe_req.num_sge = 1;
564 req->wqe_req.inline_oob_size = sizeof(struct hwc_tx_oob);
565 req->wqe_req.inline_oob_data = tx_oob;
566 req->wqe_req.client_data_unit = 0;
568 err = mana_gd_post_and_ring(hwc_txq->gdma_wq, &req->wqe_req, NULL);
570 dev_err(dev, "Failed to post WQE on HWC SQ: %d\n", err);
574 static int mana_hwc_init_inflight_msg(struct hw_channel_context *hwc,
579 sema_init(&hwc->sema, num_msg);
581 err = mana_gd_alloc_res_map(num_msg, &hwc->inflight_msg_res);
583 dev_err(hwc->dev, "Failed to init inflight_msg_res: %d\n", err);
587 static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
588 u32 max_req_msg_size, u32 max_resp_msg_size)
590 struct gdma_context *gc = hwc->gdma_dev->gdma_context;
591 struct hwc_wq *hwc_rxq = hwc->rxq;
592 struct hwc_work_request *req;
593 struct hwc_caller_ctx *ctx;
597 /* Post all WQEs on the RQ */
598 for (i = 0; i < q_depth; i++) {
599 req = &hwc_rxq->msg_buf->reqs[i];
600 err = mana_hwc_post_rx_wqe(hwc_rxq, req);
605 ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL);
609 for (i = 0; i < q_depth; ++i)
610 init_completion(&ctx[i].comp_event);
612 hwc->caller_ctx = ctx;
614 return mana_gd_test_eq(gc, hwc->cq->gdma_eq);
617 static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
618 u32 *max_req_msg_size,
619 u32 *max_resp_msg_size)
621 struct hw_channel_context *hwc = gc->hwc.driver_data;
622 struct gdma_queue *rq = hwc->rxq->gdma_wq;
623 struct gdma_queue *sq = hwc->txq->gdma_wq;
624 struct gdma_queue *eq = hwc->cq->gdma_eq;
625 struct gdma_queue *cq = hwc->cq->gdma_cq;
628 init_completion(&hwc->hwc_init_eqe_comp);
630 err = mana_smc_setup_hwc(&gc->shm_channel, false,
631 eq->mem_info.dma_handle,
632 cq->mem_info.dma_handle,
633 rq->mem_info.dma_handle,
634 sq->mem_info.dma_handle,
639 if (!wait_for_completion_timeout(&hwc->hwc_init_eqe_comp, 60 * HZ))
642 *q_depth = hwc->hwc_init_q_depth_max;
643 *max_req_msg_size = hwc->hwc_init_max_req_msg_size;
644 *max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
646 /* Both were set in mana_hwc_init_event_handler(). */
647 if (WARN_ON(cq->id >= gc->max_num_cqs))
650 gc->cq_table = vcalloc(gc->max_num_cqs, sizeof(struct gdma_queue *));
654 gc->cq_table[cq->id] = cq;
659 static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
660 u32 max_req_msg_size, u32 max_resp_msg_size)
664 err = mana_hwc_init_inflight_msg(hwc, q_depth);
668 /* CQ is shared by SQ and RQ, so CQ's queue depth is the sum of SQ
669 * queue depth and RQ queue depth.
671 err = mana_hwc_create_cq(hwc, q_depth * 2,
672 mana_hwc_init_event_handler, hwc,
673 mana_hwc_rx_event_handler, hwc,
674 mana_hwc_tx_event_handler, hwc, &hwc->cq);
676 dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
680 err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
683 dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
687 err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
690 dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
694 hwc->num_inflight_msg = q_depth;
695 hwc->max_req_msg_size = max_req_msg_size;
699 /* mana_hwc_create_channel() will do the cleanup.*/
703 int mana_hwc_create_channel(struct gdma_context *gc)
705 u32 max_req_msg_size, max_resp_msg_size;
706 struct gdma_dev *gd = &gc->hwc;
707 struct hw_channel_context *hwc;
711 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
715 gd->gdma_context = gc;
716 gd->driver_data = hwc;
719 hwc->hwc_timeout = HW_CHANNEL_WAIT_RESOURCE_TIMEOUT_MS;
721 /* HWC's instance number is always 0. */
722 gd->dev_id.as_uint32 = 0;
723 gd->dev_id.type = GDMA_DEVICE_HWC;
725 gd->pdid = INVALID_PDID;
726 gd->doorbell = INVALID_DOORBELL;
728 /* mana_hwc_init_queues() only creates the required data structures,
729 * and doesn't touch the HWC device.
731 err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
732 HW_CHANNEL_MAX_REQUEST_SIZE,
733 HW_CHANNEL_MAX_RESPONSE_SIZE);
735 dev_err(hwc->dev, "Failed to initialize HWC: %d\n", err);
739 err = mana_hwc_establish_channel(gc, &q_depth_max, &max_req_msg_size,
742 dev_err(hwc->dev, "Failed to establish HWC: %d\n", err);
746 err = mana_hwc_test_channel(gc->hwc.driver_data,
747 HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
748 max_req_msg_size, max_resp_msg_size);
750 dev_err(hwc->dev, "Failed to test HWC: %d\n", err);
756 mana_hwc_destroy_channel(gc);
760 void mana_hwc_destroy_channel(struct gdma_context *gc)
762 struct hw_channel_context *hwc = gc->hwc.driver_data;
767 /* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
768 * non-zero, the HWC worked and we should tear down the HWC here.
770 if (gc->max_num_cqs > 0) {
771 mana_smc_teardown_hwc(&gc->shm_channel, false);
775 kfree(hwc->caller_ctx);
776 hwc->caller_ctx = NULL;
779 mana_hwc_destroy_wq(hwc, hwc->txq);
782 mana_hwc_destroy_wq(hwc, hwc->rxq);
785 mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
787 mana_gd_free_res_map(&hwc->inflight_msg_res);
789 hwc->num_inflight_msg = 0;
791 hwc->gdma_dev->doorbell = INVALID_DOORBELL;
792 hwc->gdma_dev->pdid = INVALID_PDID;
794 hwc->hwc_timeout = 0;
797 gc->hwc.driver_data = NULL;
798 gc->hwc.gdma_context = NULL;
804 int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
805 const void *req, u32 resp_len, void *resp)
807 struct gdma_context *gc = hwc->gdma_dev->gdma_context;
808 struct hwc_work_request *tx_wr;
809 struct hwc_wq *txq = hwc->txq;
810 struct gdma_req_hdr *req_msg;
811 struct hwc_caller_ctx *ctx;
817 mana_hwc_get_msg_index(hwc, &msg_id);
819 tx_wr = &txq->msg_buf->reqs[msg_id];
821 if (req_len > tx_wr->buf_len) {
822 dev_err(hwc->dev, "HWC: req msg size: %d > %d\n", req_len,
828 ctx = hwc->caller_ctx + msg_id;
829 ctx->output_buf = resp;
830 ctx->output_buflen = resp_len;
832 req_msg = (struct gdma_req_hdr *)tx_wr->buf_va;
834 memcpy(req_msg, req, req_len);
836 req_msg->req.hwc_msg_id = msg_id;
838 tx_wr->msg_size = req_len;
841 dest_vrq = hwc->pf_dest_vrq_id;
842 dest_vrcq = hwc->pf_dest_vrcq_id;
845 err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false);
847 dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
851 if (!wait_for_completion_timeout(&ctx->comp_event,
852 (msecs_to_jiffies(hwc->hwc_timeout)))) {
853 dev_err(hwc->dev, "HWC: Request timed out!\n");
863 if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
864 dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
870 mana_hwc_put_msg_index(hwc, msg_id);