1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2017 Linaro Ltd.
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/iopoll.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
18 #include "hfi_venus.h"
19 #include "hfi_venus_io.h"
22 #define HFI_MASK_QHDR_TX_TYPE 0xff000000
23 #define HFI_MASK_QHDR_RX_TYPE 0x00ff0000
24 #define HFI_MASK_QHDR_PRI_TYPE 0x0000ff00
25 #define HFI_MASK_QHDR_ID_TYPE 0x000000ff
27 #define HFI_HOST_TO_CTRL_CMD_Q 0
28 #define HFI_CTRL_TO_HOST_MSG_Q 1
29 #define HFI_CTRL_TO_HOST_DBG_Q 2
30 #define HFI_MASK_QHDR_STATUS 0x000000ff
33 #define IFACEQ_CMD_IDX 0
34 #define IFACEQ_MSG_IDX 1
35 #define IFACEQ_DBG_IDX 2
36 #define IFACEQ_MAX_BUF_COUNT 50
37 #define IFACEQ_MAX_PARALLEL_CLNTS 16
38 #define IFACEQ_DFLT_QHDR 0x01010000
40 #define POLL_INTERVAL_US 50
42 #define IFACEQ_MAX_PKT_SIZE 1024
43 #define IFACEQ_MED_PKT_SIZE 768
44 #define IFACEQ_MIN_PKT_SIZE 8
45 #define IFACEQ_VAR_SMALL_PKT_SIZE 100
46 #define IFACEQ_VAR_LARGE_PKT_SIZE 512
47 #define IFACEQ_VAR_HUGE_PKT_SIZE (1024 * 12)
49 struct hfi_queue_table_header {
58 struct hfi_queue_header {
75 #define IFACEQ_TABLE_SIZE \
76 (sizeof(struct hfi_queue_table_header) + \
77 sizeof(struct hfi_queue_header) * IFACEQ_NUM)
79 #define IFACEQ_QUEUE_SIZE (IFACEQ_MAX_PKT_SIZE * \
80 IFACEQ_MAX_BUF_COUNT * IFACEQ_MAX_PARALLEL_CLNTS)
82 #define IFACEQ_GET_QHDR_START_ADDR(ptr, i) \
83 (void *)(((ptr) + sizeof(struct hfi_queue_table_header)) + \
84 ((i) * sizeof(struct hfi_queue_header)))
86 #define QDSS_SIZE SZ_4K
87 #define SFR_SIZE SZ_4K
89 (IFACEQ_TABLE_SIZE + (IFACEQ_QUEUE_SIZE * IFACEQ_NUM))
91 #define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K)
92 #define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K)
93 #define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K)
94 #define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
95 ALIGNED_QDSS_SIZE, SZ_1M)
98 dma_addr_t da; /* device address */
99 void *kva; /* kernel virtual address */
105 struct hfi_queue_header *qhdr;
106 struct mem_desc qmem;
110 VENUS_STATE_DEINIT = 1,
114 struct venus_hfi_device {
115 struct venus_core *core;
117 u32 last_packet_type;
120 enum venus_state state;
121 /* serialize read / write to the shared memory */
123 struct completion pwr_collapse_prep;
124 struct completion release_resource;
125 struct mem_desc ifaceq_table;
127 struct iface_queue queues[IFACEQ_NUM];
128 u8 pkt_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
129 u8 dbg_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
132 static bool venus_pkt_debug;
133 int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL;
134 static bool venus_fw_low_power_mode = true;
135 static int venus_hw_rsp_timeout = 1000;
136 static bool venus_fw_coverage;
138 static void venus_set_state(struct venus_hfi_device *hdev,
139 enum venus_state state)
141 mutex_lock(&hdev->lock);
143 mutex_unlock(&hdev->lock);
146 static bool venus_is_valid_state(struct venus_hfi_device *hdev)
148 return hdev->state != VENUS_STATE_DEINIT;
151 static void venus_dump_packet(struct venus_hfi_device *hdev, const void *packet)
153 size_t pkt_size = *(u32 *)packet;
155 if (!venus_pkt_debug)
158 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1, packet,
162 static int venus_write_queue(struct venus_hfi_device *hdev,
163 struct iface_queue *queue,
164 void *packet, u32 *rx_req)
166 struct hfi_queue_header *qhdr;
167 u32 dwords, new_wr_idx;
168 u32 empty_space, rd_idx, wr_idx, qsize;
171 if (!queue->qmem.kva)
178 venus_dump_packet(hdev, packet);
180 dwords = (*(u32 *)packet) >> 2;
184 rd_idx = qhdr->read_idx;
185 wr_idx = qhdr->write_idx;
186 qsize = qhdr->q_size;
187 /* ensure rd/wr indices's are read from memory */
190 if (wr_idx >= rd_idx)
191 empty_space = qsize - (wr_idx - rd_idx);
193 empty_space = rd_idx - wr_idx;
195 if (empty_space <= dwords) {
197 /* ensure tx_req is updated in memory */
203 /* ensure tx_req is updated in memory */
206 new_wr_idx = wr_idx + dwords;
207 wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
208 if (new_wr_idx < qsize) {
209 memcpy(wr_ptr, packet, dwords << 2);
214 len = (dwords - new_wr_idx) << 2;
215 memcpy(wr_ptr, packet, len);
216 memcpy(queue->qmem.kva, packet + len, new_wr_idx << 2);
219 /* make sure packet is written before updating the write index */
222 qhdr->write_idx = new_wr_idx;
223 *rx_req = qhdr->rx_req ? 1 : 0;
225 /* make sure write index is updated before an interrupt is raised */
231 static int venus_read_queue(struct venus_hfi_device *hdev,
232 struct iface_queue *queue, void *pkt, u32 *tx_req)
234 struct hfi_queue_header *qhdr;
235 u32 dwords, new_rd_idx;
236 u32 rd_idx, wr_idx, type, qsize;
238 u32 recv_request = 0;
241 if (!queue->qmem.kva)
249 rd_idx = qhdr->read_idx;
250 wr_idx = qhdr->write_idx;
251 qsize = qhdr->q_size;
253 /* make sure data is valid before using it */
257 * Do not set receive request for debug queue, if set, Venus generates
258 * interrupt for debug messages even when there is no response message
259 * available. In general debug queue will not become full as it is being
260 * emptied out for every interrupt from Venus. Venus will anyway
261 * generates interrupt if it is full.
263 if (type & HFI_CTRL_TO_HOST_MSG_Q)
266 if (rd_idx == wr_idx) {
267 qhdr->rx_req = recv_request;
269 /* update rx_req field in memory */
274 rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
275 dwords = *rd_ptr >> 2;
279 new_rd_idx = rd_idx + dwords;
280 if (((dwords << 2) <= IFACEQ_VAR_HUGE_PKT_SIZE) && rd_idx <= qsize) {
281 if (new_rd_idx < qsize) {
282 memcpy(pkt, rd_ptr, dwords << 2);
287 len = (dwords - new_rd_idx) << 2;
288 memcpy(pkt, rd_ptr, len);
289 memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2);
292 /* bad packet received, dropping */
293 new_rd_idx = qhdr->write_idx;
297 /* ensure the packet is read before updating read index */
300 qhdr->read_idx = new_rd_idx;
301 /* ensure updating read index */
304 rd_idx = qhdr->read_idx;
305 wr_idx = qhdr->write_idx;
306 /* ensure rd/wr indices are read from memory */
309 if (rd_idx != wr_idx)
312 qhdr->rx_req = recv_request;
314 *tx_req = qhdr->tx_req ? 1 : 0;
316 /* ensure rx_req is stored to memory and tx_req is loaded from memory */
319 venus_dump_packet(hdev, pkt);
324 static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc,
327 struct device *dev = hdev->core->dev;
329 desc->attrs = DMA_ATTR_WRITE_COMBINE;
330 desc->size = ALIGN(size, SZ_4K);
332 desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL,
340 static void venus_free(struct venus_hfi_device *hdev, struct mem_desc *mem)
342 struct device *dev = hdev->core->dev;
344 dma_free_attrs(dev, mem->size, mem->kva, mem->da, mem->attrs);
347 static void venus_set_registers(struct venus_hfi_device *hdev)
349 const struct venus_resources *res = hdev->core->res;
350 const struct reg_val *tbl = res->reg_tbl;
351 unsigned int count = res->reg_tbl_size;
354 for (i = 0; i < count; i++)
355 writel(tbl[i].value, hdev->core->base + tbl[i].reg);
358 static void venus_soft_int(struct venus_hfi_device *hdev)
360 void __iomem *cpu_ic_base = hdev->core->cpu_ic_base;
363 if (IS_V6(hdev->core))
364 clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT_V6);
366 clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT);
368 writel(clear_bit, cpu_ic_base + CPU_IC_SOFTINT);
371 static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev,
372 void *pkt, bool sync)
374 struct device *dev = hdev->core->dev;
375 struct hfi_pkt_hdr *cmd_packet;
376 struct iface_queue *queue;
380 if (!venus_is_valid_state(hdev))
383 cmd_packet = (struct hfi_pkt_hdr *)pkt;
384 hdev->last_packet_type = cmd_packet->pkt_type;
386 queue = &hdev->queues[IFACEQ_CMD_IDX];
388 ret = venus_write_queue(hdev, queue, pkt, &rx_req);
390 dev_err(dev, "write to iface cmd queue failed (%d)\n", ret);
396 * Inform video hardware to raise interrupt for synchronous
399 queue = &hdev->queues[IFACEQ_MSG_IDX];
400 queue->qhdr->rx_req = 1;
401 /* ensure rx_req is updated in memory */
406 venus_soft_int(hdev);
411 static int venus_iface_cmdq_write(struct venus_hfi_device *hdev, void *pkt, bool sync)
415 mutex_lock(&hdev->lock);
416 ret = venus_iface_cmdq_write_nolock(hdev, pkt, sync);
417 mutex_unlock(&hdev->lock);
422 static int venus_hfi_core_set_resource(struct venus_core *core, u32 id,
423 u32 size, u32 addr, void *cookie)
425 struct venus_hfi_device *hdev = to_hfi_priv(core);
426 struct hfi_sys_set_resource_pkt *pkt;
427 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
430 if (id == VIDC_RESOURCE_NONE)
433 pkt = (struct hfi_sys_set_resource_pkt *)packet;
435 ret = pkt_sys_set_resource(pkt, id, size, addr, cookie);
439 ret = venus_iface_cmdq_write(hdev, pkt, false);
446 static int venus_boot_core(struct venus_hfi_device *hdev)
448 struct device *dev = hdev->core->dev;
449 static const unsigned int max_tries = 100;
450 u32 ctrl_status = 0, mask_val = 0;
451 unsigned int count = 0;
452 void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
453 void __iomem *wrapper_base = hdev->core->wrapper_base;
456 if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
457 mask_val = readl(wrapper_base + WRAPPER_INTR_MASK);
458 mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BASK_V6 |
459 WRAPPER_INTR_MASK_A2HCPU_MASK);
461 mask_val = WRAPPER_INTR_MASK_A2HVCODEC_MASK;
464 writel(mask_val, wrapper_base + WRAPPER_INTR_MASK);
465 writel(1, cpu_cs_base + CPU_CS_SCIACMDARG3);
467 writel(BIT(VIDC_CTRL_INIT_CTRL_SHIFT), cpu_cs_base + VIDC_CTRL_INIT);
468 while (!ctrl_status && count < max_tries) {
469 ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
470 if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) {
471 dev_err(dev, "invalid setting for UC_REGION\n");
476 usleep_range(500, 1000);
480 if (count >= max_tries)
483 if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
484 writel(0x1, cpu_cs_base + CPU_CS_H2XSOFTINTEN_V6);
485 writel(0x0, cpu_cs_base + CPU_CS_X2RPMH_V6);
491 static u32 venus_hwversion(struct venus_hfi_device *hdev)
493 struct device *dev = hdev->core->dev;
494 void __iomem *wrapper_base = hdev->core->wrapper_base;
496 u32 major, minor, step;
498 ver = readl(wrapper_base + WRAPPER_HW_VERSION);
499 major = ver & WRAPPER_HW_VERSION_MAJOR_VERSION_MASK;
500 major = major >> WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT;
501 minor = ver & WRAPPER_HW_VERSION_MINOR_VERSION_MASK;
502 minor = minor >> WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT;
503 step = ver & WRAPPER_HW_VERSION_STEP_VERSION_MASK;
505 dev_dbg(dev, VDBGL "venus hw version %x.%x.%x\n", major, minor, step);
510 static int venus_run(struct venus_hfi_device *hdev)
512 struct device *dev = hdev->core->dev;
513 void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
517 * Re-program all of the registers that get reset as a result of
518 * regulator_disable() and _enable()
520 venus_set_registers(hdev);
522 writel(hdev->ifaceq_table.da, cpu_cs_base + UC_REGION_ADDR);
523 writel(SHARED_QSIZE, cpu_cs_base + UC_REGION_SIZE);
524 writel(hdev->ifaceq_table.da, cpu_cs_base + CPU_CS_SCIACMDARG2);
525 writel(0x01, cpu_cs_base + CPU_CS_SCIACMDARG1);
527 writel(hdev->sfr.da, cpu_cs_base + SFR_ADDR);
529 ret = venus_boot_core(hdev);
531 dev_err(dev, "failed to reset venus core\n");
535 venus_hwversion(hdev);
540 static int venus_halt_axi(struct venus_hfi_device *hdev)
542 void __iomem *wrapper_base = hdev->core->wrapper_base;
543 void __iomem *vbif_base = hdev->core->vbif_base;
544 void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
545 void __iomem *aon_base = hdev->core->aon_base;
546 struct device *dev = hdev->core->dev;
551 if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
552 writel(0x3, cpu_cs_base + CPU_CS_X2RPMH_V6);
554 if (IS_IRIS2_1(hdev->core))
555 goto skip_aon_mvp_noc;
557 writel(0x1, aon_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
558 ret = readl_poll_timeout(aon_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
562 VBIF_AXI_HALT_ACK_TIMEOUT_US);
567 mask_val = (BIT(2) | BIT(1) | BIT(0));
568 writel(mask_val, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6);
570 writel(0x00, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6);
571 ret = readl_poll_timeout(wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS_V6,
575 VBIF_AXI_HALT_ACK_TIMEOUT_US);
578 dev_err(dev, "DBLP Release: lpi_status %x\n", val);
584 if (IS_V4(hdev->core)) {
585 val = readl(wrapper_base + WRAPPER_CPU_AXI_HALT);
586 val |= WRAPPER_CPU_AXI_HALT_HALT;
587 writel(val, wrapper_base + WRAPPER_CPU_AXI_HALT);
589 ret = readl_poll_timeout(wrapper_base + WRAPPER_CPU_AXI_HALT_STATUS,
591 val & WRAPPER_CPU_AXI_HALT_STATUS_IDLE,
593 VBIF_AXI_HALT_ACK_TIMEOUT_US);
595 dev_err(dev, "AXI bus port halt timeout\n");
602 /* Halt AXI and AXI IMEM VBIF Access */
603 val = readl(vbif_base + VBIF_AXI_HALT_CTRL0);
604 val |= VBIF_AXI_HALT_CTRL0_HALT_REQ;
605 writel(val, vbif_base + VBIF_AXI_HALT_CTRL0);
607 /* Request for AXI bus port halt */
608 ret = readl_poll_timeout(vbif_base + VBIF_AXI_HALT_CTRL1, val,
609 val & VBIF_AXI_HALT_CTRL1_HALT_ACK,
611 VBIF_AXI_HALT_ACK_TIMEOUT_US);
613 dev_err(dev, "AXI bus port halt timeout\n");
620 static int venus_power_off(struct venus_hfi_device *hdev)
624 if (!hdev->power_enabled)
627 ret = venus_set_hw_state_suspend(hdev->core);
631 ret = venus_halt_axi(hdev);
635 hdev->power_enabled = false;
640 static int venus_power_on(struct venus_hfi_device *hdev)
644 if (hdev->power_enabled)
647 ret = venus_set_hw_state_resume(hdev->core);
651 ret = venus_run(hdev);
655 hdev->power_enabled = true;
660 venus_set_hw_state_suspend(hdev->core);
662 hdev->power_enabled = false;
666 static int venus_iface_msgq_read_nolock(struct venus_hfi_device *hdev,
669 struct iface_queue *queue;
673 if (!venus_is_valid_state(hdev))
676 queue = &hdev->queues[IFACEQ_MSG_IDX];
678 ret = venus_read_queue(hdev, queue, pkt, &tx_req);
683 venus_soft_int(hdev);
688 static int venus_iface_msgq_read(struct venus_hfi_device *hdev, void *pkt)
692 mutex_lock(&hdev->lock);
693 ret = venus_iface_msgq_read_nolock(hdev, pkt);
694 mutex_unlock(&hdev->lock);
699 static int venus_iface_dbgq_read_nolock(struct venus_hfi_device *hdev,
702 struct iface_queue *queue;
706 ret = venus_is_valid_state(hdev);
710 queue = &hdev->queues[IFACEQ_DBG_IDX];
712 ret = venus_read_queue(hdev, queue, pkt, &tx_req);
717 venus_soft_int(hdev);
722 static int venus_iface_dbgq_read(struct venus_hfi_device *hdev, void *pkt)
729 mutex_lock(&hdev->lock);
730 ret = venus_iface_dbgq_read_nolock(hdev, pkt);
731 mutex_unlock(&hdev->lock);
736 static void venus_set_qhdr_defaults(struct hfi_queue_header *qhdr)
739 qhdr->type = IFACEQ_DFLT_QHDR;
740 qhdr->q_size = IFACEQ_QUEUE_SIZE / 4;
746 qhdr->rx_irq_status = 0;
747 qhdr->tx_irq_status = 0;
752 static void venus_interface_queues_release(struct venus_hfi_device *hdev)
754 mutex_lock(&hdev->lock);
756 venus_free(hdev, &hdev->ifaceq_table);
757 venus_free(hdev, &hdev->sfr);
759 memset(hdev->queues, 0, sizeof(hdev->queues));
760 memset(&hdev->ifaceq_table, 0, sizeof(hdev->ifaceq_table));
761 memset(&hdev->sfr, 0, sizeof(hdev->sfr));
763 mutex_unlock(&hdev->lock);
766 static int venus_interface_queues_init(struct venus_hfi_device *hdev)
768 struct hfi_queue_table_header *tbl_hdr;
769 struct iface_queue *queue;
771 struct mem_desc desc = {0};
776 ret = venus_alloc(hdev, &desc, ALIGNED_QUEUE_SIZE);
780 hdev->ifaceq_table = desc;
781 offset = IFACEQ_TABLE_SIZE;
783 for (i = 0; i < IFACEQ_NUM; i++) {
784 queue = &hdev->queues[i];
785 queue->qmem.da = desc.da + offset;
786 queue->qmem.kva = desc.kva + offset;
787 queue->qmem.size = IFACEQ_QUEUE_SIZE;
788 offset += queue->qmem.size;
790 IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
792 venus_set_qhdr_defaults(queue->qhdr);
794 queue->qhdr->start_addr = queue->qmem.da;
796 if (i == IFACEQ_CMD_IDX)
797 queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
798 else if (i == IFACEQ_MSG_IDX)
799 queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
800 else if (i == IFACEQ_DBG_IDX)
801 queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
804 tbl_hdr = hdev->ifaceq_table.kva;
805 tbl_hdr->version = 0;
806 tbl_hdr->size = IFACEQ_TABLE_SIZE;
807 tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
808 tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
809 tbl_hdr->num_q = IFACEQ_NUM;
810 tbl_hdr->num_active_q = IFACEQ_NUM;
813 * Set receive request to zero on debug queue as there is no
814 * need of interrupt from video hardware for debug messages
816 queue = &hdev->queues[IFACEQ_DBG_IDX];
817 queue->qhdr->rx_req = 0;
819 ret = venus_alloc(hdev, &desc, ALIGNED_SFR_SIZE);
825 sfr->buf_size = ALIGNED_SFR_SIZE;
828 /* ensure table and queue header structs are settled in memory */
834 static int venus_sys_set_debug(struct venus_hfi_device *hdev, u32 debug)
836 struct hfi_sys_set_property_pkt *pkt;
837 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
839 pkt = (struct hfi_sys_set_property_pkt *)packet;
841 pkt_sys_debug_config(pkt, HFI_DEBUG_MODE_QUEUE, debug);
843 return venus_iface_cmdq_write(hdev, pkt, false);
846 static int venus_sys_set_coverage(struct venus_hfi_device *hdev, u32 mode)
848 struct hfi_sys_set_property_pkt *pkt;
849 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
851 pkt = (struct hfi_sys_set_property_pkt *)packet;
853 pkt_sys_coverage_config(pkt, mode);
855 return venus_iface_cmdq_write(hdev, pkt, false);
858 static int venus_sys_set_idle_message(struct venus_hfi_device *hdev,
861 struct hfi_sys_set_property_pkt *pkt;
862 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
867 pkt = (struct hfi_sys_set_property_pkt *)packet;
869 pkt_sys_idle_indicator(pkt, enable);
871 return venus_iface_cmdq_write(hdev, pkt, false);
874 static int venus_sys_set_power_control(struct venus_hfi_device *hdev,
877 struct hfi_sys_set_property_pkt *pkt;
878 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
880 pkt = (struct hfi_sys_set_property_pkt *)packet;
882 pkt_sys_power_control(pkt, enable);
884 return venus_iface_cmdq_write(hdev, pkt, false);
887 static int venus_sys_set_ubwc_config(struct venus_hfi_device *hdev)
889 struct hfi_sys_set_property_pkt *pkt;
890 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
891 const struct venus_resources *res = hdev->core->res;
894 pkt = (struct hfi_sys_set_property_pkt *)packet;
896 pkt_sys_ubwc_config(pkt, res->ubwc_conf);
898 ret = venus_iface_cmdq_write(hdev, pkt, false);
905 static int venus_get_queue_size(struct venus_hfi_device *hdev,
908 struct hfi_queue_header *qhdr;
910 if (index >= IFACEQ_NUM)
913 qhdr = hdev->queues[index].qhdr;
917 return abs(qhdr->read_idx - qhdr->write_idx);
920 static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
922 struct device *dev = hdev->core->dev;
923 const struct venus_resources *res = hdev->core->res;
926 ret = venus_sys_set_debug(hdev, venus_fw_debug);
928 dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret);
930 /* HFI_PROPERTY_SYS_IDLE_INDICATOR is not supported beyond 8916 (HFI V1) */
931 if (IS_V1(hdev->core)) {
932 ret = venus_sys_set_idle_message(hdev, false);
934 dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
937 ret = venus_sys_set_power_control(hdev, venus_fw_low_power_mode);
939 dev_warn(dev, "setting hw power collapse ON failed (%d)\n",
942 /* For specific venus core, it is mandatory to set the UBWC configuration */
943 if (res->ubwc_conf) {
944 ret = venus_sys_set_ubwc_config(hdev);
946 dev_warn(dev, "setting ubwc config failed (%d)\n", ret);
952 static int venus_session_cmd(struct venus_inst *inst, u32 pkt_type, bool sync)
954 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
955 struct hfi_session_pkt pkt;
957 pkt_session_cmd(&pkt, pkt_type, inst);
959 return venus_iface_cmdq_write(hdev, &pkt, sync);
962 static void venus_flush_debug_queue(struct venus_hfi_device *hdev)
964 struct device *dev = hdev->core->dev;
965 void *packet = hdev->dbg_buf;
967 while (!venus_iface_dbgq_read(hdev, packet)) {
968 struct hfi_msg_sys_coverage_pkt *pkt = packet;
970 if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) {
971 struct hfi_msg_sys_debug_pkt *pkt = packet;
973 dev_dbg(dev, VDBGFW "%s", pkt->msg_data);
978 static int venus_prepare_power_collapse(struct venus_hfi_device *hdev,
981 unsigned long timeout = msecs_to_jiffies(venus_hw_rsp_timeout);
982 struct hfi_sys_pc_prep_pkt pkt;
985 init_completion(&hdev->pwr_collapse_prep);
987 pkt_sys_pc_prep(&pkt);
989 ret = venus_iface_cmdq_write(hdev, &pkt, false);
996 ret = wait_for_completion_timeout(&hdev->pwr_collapse_prep, timeout);
998 venus_flush_debug_queue(hdev);
1005 static int venus_are_queues_empty(struct venus_hfi_device *hdev)
1009 ret1 = venus_get_queue_size(hdev, IFACEQ_MSG_IDX);
1013 ret2 = venus_get_queue_size(hdev, IFACEQ_CMD_IDX);
1023 static void venus_sfr_print(struct venus_hfi_device *hdev)
1025 struct device *dev = hdev->core->dev;
1026 struct hfi_sfr *sfr = hdev->sfr.kva;
1032 p = memchr(sfr->data, '\0', sfr->buf_size);
1034 * SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates
1035 * that Venus is in the process of crashing.
1038 sfr->data[sfr->buf_size - 1] = '\0';
1040 dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data);
1043 static void venus_process_msg_sys_error(struct venus_hfi_device *hdev,
1046 struct hfi_msg_event_notify_pkt *event_pkt = packet;
1048 if (event_pkt->event_id != HFI_EVENT_SYS_ERROR)
1051 venus_set_state(hdev, VENUS_STATE_DEINIT);
1053 venus_sfr_print(hdev);
1056 static irqreturn_t venus_isr_thread(struct venus_core *core)
1058 struct venus_hfi_device *hdev = to_hfi_priv(core);
1059 const struct venus_resources *res;
1066 res = hdev->core->res;
1067 pkt = hdev->pkt_buf;
1070 while (!venus_iface_msgq_read(hdev, pkt)) {
1071 msg_ret = hfi_process_msg_packet(core, pkt);
1073 case HFI_MSG_EVENT_NOTIFY:
1074 venus_process_msg_sys_error(hdev, pkt);
1076 case HFI_MSG_SYS_INIT:
1077 venus_hfi_core_set_resource(core, res->vmem_id,
1082 case HFI_MSG_SYS_RELEASE_RESOURCE:
1083 complete(&hdev->release_resource);
1085 case HFI_MSG_SYS_PC_PREP:
1086 complete(&hdev->pwr_collapse_prep);
1093 venus_flush_debug_queue(hdev);
1098 static irqreturn_t venus_isr(struct venus_core *core)
1100 struct venus_hfi_device *hdev = to_hfi_priv(core);
1102 void __iomem *cpu_cs_base;
1103 void __iomem *wrapper_base;
1108 cpu_cs_base = hdev->core->cpu_cs_base;
1109 wrapper_base = hdev->core->wrapper_base;
1111 status = readl(wrapper_base + WRAPPER_INTR_STATUS);
1113 if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
1114 status & WRAPPER_INTR_STATUS_A2HWD_MASK_V6 ||
1115 status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1116 hdev->irq_status = status;
1118 if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
1119 status & WRAPPER_INTR_STATUS_A2HWD_MASK ||
1120 status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1121 hdev->irq_status = status;
1123 writel(1, cpu_cs_base + CPU_CS_A2HSOFTINTCLR);
1125 writel(status, wrapper_base + WRAPPER_INTR_CLEAR);
1127 return IRQ_WAKE_THREAD;
1130 static int venus_core_init(struct venus_core *core)
1132 struct venus_hfi_device *hdev = to_hfi_priv(core);
1133 struct device *dev = core->dev;
1134 struct hfi_sys_get_property_pkt version_pkt;
1135 struct hfi_sys_init_pkt pkt;
1138 pkt_sys_init(&pkt, HFI_VIDEO_ARCH_OX);
1140 venus_set_state(hdev, VENUS_STATE_INIT);
1142 ret = venus_iface_cmdq_write(hdev, &pkt, false);
1146 pkt_sys_image_version(&version_pkt);
1148 ret = venus_iface_cmdq_write(hdev, &version_pkt, false);
1150 dev_warn(dev, "failed to send image version pkt to fw\n");
1152 ret = venus_sys_set_default_properties(hdev);
1159 static int venus_core_deinit(struct venus_core *core)
1161 struct venus_hfi_device *hdev = to_hfi_priv(core);
1163 venus_set_state(hdev, VENUS_STATE_DEINIT);
1164 hdev->suspended = true;
1165 hdev->power_enabled = false;
1170 static int venus_core_ping(struct venus_core *core, u32 cookie)
1172 struct venus_hfi_device *hdev = to_hfi_priv(core);
1173 struct hfi_sys_ping_pkt pkt;
1175 pkt_sys_ping(&pkt, cookie);
1177 return venus_iface_cmdq_write(hdev, &pkt, false);
1180 static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type)
1182 struct venus_hfi_device *hdev = to_hfi_priv(core);
1183 struct hfi_sys_test_ssr_pkt pkt;
1186 ret = pkt_sys_ssr_cmd(&pkt, trigger_type);
1190 return venus_iface_cmdq_write(hdev, &pkt, false);
1193 static int venus_session_init(struct venus_inst *inst, u32 session_type,
1196 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1197 struct hfi_session_init_pkt pkt;
1200 ret = venus_sys_set_debug(hdev, venus_fw_debug);
1204 ret = pkt_session_init(&pkt, inst, session_type, codec);
1208 ret = venus_iface_cmdq_write(hdev, &pkt, true);
1215 venus_flush_debug_queue(hdev);
1219 static int venus_session_end(struct venus_inst *inst)
1221 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1222 struct device *dev = hdev->core->dev;
1224 if (venus_fw_coverage) {
1225 if (venus_sys_set_coverage(hdev, venus_fw_coverage))
1226 dev_warn(dev, "fw coverage msg ON failed\n");
1229 return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_END, true);
1232 static int venus_session_abort(struct venus_inst *inst)
1234 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1236 venus_flush_debug_queue(hdev);
1238 return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_ABORT, true);
1241 static int venus_session_flush(struct venus_inst *inst, u32 flush_mode)
1243 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1244 struct hfi_session_flush_pkt pkt;
1247 ret = pkt_session_flush(&pkt, inst, flush_mode);
1251 return venus_iface_cmdq_write(hdev, &pkt, true);
1254 static int venus_session_start(struct venus_inst *inst)
1256 return venus_session_cmd(inst, HFI_CMD_SESSION_START, true);
1259 static int venus_session_stop(struct venus_inst *inst)
1261 return venus_session_cmd(inst, HFI_CMD_SESSION_STOP, true);
1264 static int venus_session_continue(struct venus_inst *inst)
1266 return venus_session_cmd(inst, HFI_CMD_SESSION_CONTINUE, false);
1269 static int venus_session_etb(struct venus_inst *inst,
1270 struct hfi_frame_data *in_frame)
1272 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1273 u32 session_type = inst->session_type;
1276 if (session_type == VIDC_SESSION_TYPE_DEC) {
1277 struct hfi_session_empty_buffer_compressed_pkt pkt;
1279 ret = pkt_session_etb_decoder(&pkt, inst, in_frame);
1283 ret = venus_iface_cmdq_write(hdev, &pkt, false);
1284 } else if (session_type == VIDC_SESSION_TYPE_ENC) {
1285 struct hfi_session_empty_buffer_uncompressed_plane0_pkt pkt;
1287 ret = pkt_session_etb_encoder(&pkt, inst, in_frame);
1291 ret = venus_iface_cmdq_write(hdev, &pkt, false);
1299 static int venus_session_ftb(struct venus_inst *inst,
1300 struct hfi_frame_data *out_frame)
1302 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1303 struct hfi_session_fill_buffer_pkt pkt;
1306 ret = pkt_session_ftb(&pkt, inst, out_frame);
1310 return venus_iface_cmdq_write(hdev, &pkt, false);
1313 static int venus_session_set_buffers(struct venus_inst *inst,
1314 struct hfi_buffer_desc *bd)
1316 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1317 struct hfi_session_set_buffers_pkt *pkt;
1318 u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1321 if (bd->buffer_type == HFI_BUFFER_INPUT)
1324 pkt = (struct hfi_session_set_buffers_pkt *)packet;
1326 ret = pkt_session_set_buffers(pkt, inst, bd);
1330 return venus_iface_cmdq_write(hdev, pkt, false);
1333 static int venus_session_unset_buffers(struct venus_inst *inst,
1334 struct hfi_buffer_desc *bd)
1336 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1337 struct hfi_session_release_buffer_pkt *pkt;
1338 u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1341 if (bd->buffer_type == HFI_BUFFER_INPUT)
1344 pkt = (struct hfi_session_release_buffer_pkt *)packet;
1346 ret = pkt_session_unset_buffers(pkt, inst, bd);
1350 return venus_iface_cmdq_write(hdev, pkt, true);
1353 static int venus_session_load_res(struct venus_inst *inst)
1355 return venus_session_cmd(inst, HFI_CMD_SESSION_LOAD_RESOURCES, true);
1358 static int venus_session_release_res(struct venus_inst *inst)
1360 return venus_session_cmd(inst, HFI_CMD_SESSION_RELEASE_RESOURCES, true);
1363 static int venus_session_parse_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
1366 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1367 struct hfi_session_parse_sequence_header_pkt *pkt;
1368 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
1371 pkt = (struct hfi_session_parse_sequence_header_pkt *)packet;
1373 ret = pkt_session_parse_seq_header(pkt, inst, seq_hdr, seq_hdr_len);
1377 ret = venus_iface_cmdq_write(hdev, pkt, false);
1384 static int venus_session_get_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
1387 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1388 struct hfi_session_get_sequence_header_pkt *pkt;
1389 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
1392 pkt = (struct hfi_session_get_sequence_header_pkt *)packet;
1394 ret = pkt_session_get_seq_hdr(pkt, inst, seq_hdr, seq_hdr_len);
1398 return venus_iface_cmdq_write(hdev, pkt, false);
1401 static int venus_session_set_property(struct venus_inst *inst, u32 ptype,
1404 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1405 struct hfi_session_set_property_pkt *pkt;
1406 u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
1409 pkt = (struct hfi_session_set_property_pkt *)packet;
1411 ret = pkt_session_set_property(pkt, inst, ptype, pdata);
1412 if (ret == -ENOTSUPP)
1417 return venus_iface_cmdq_write(hdev, pkt, false);
1420 static int venus_session_get_property(struct venus_inst *inst, u32 ptype)
1422 struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
1423 struct hfi_session_get_property_pkt pkt;
1426 ret = pkt_session_get_property(&pkt, inst, ptype);
1430 return venus_iface_cmdq_write(hdev, &pkt, true);
1433 static int venus_resume(struct venus_core *core)
1435 struct venus_hfi_device *hdev = to_hfi_priv(core);
1438 mutex_lock(&hdev->lock);
1440 if (!hdev->suspended)
1443 ret = venus_power_on(hdev);
1447 hdev->suspended = false;
1449 mutex_unlock(&hdev->lock);
1454 static int venus_suspend_1xx(struct venus_core *core)
1456 struct venus_hfi_device *hdev = to_hfi_priv(core);
1457 struct device *dev = core->dev;
1458 void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1462 if (!hdev->power_enabled || hdev->suspended)
1465 mutex_lock(&hdev->lock);
1466 ret = venus_is_valid_state(hdev);
1467 mutex_unlock(&hdev->lock);
1470 dev_err(dev, "bad state, cannot suspend\n");
1474 ret = venus_prepare_power_collapse(hdev, true);
1476 dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
1480 mutex_lock(&hdev->lock);
1482 if (hdev->last_packet_type != HFI_CMD_SYS_PC_PREP) {
1483 mutex_unlock(&hdev->lock);
1487 ret = venus_are_queues_empty(hdev);
1488 if (ret < 0 || !ret) {
1489 mutex_unlock(&hdev->lock);
1493 ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1494 if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) {
1495 mutex_unlock(&hdev->lock);
1499 ret = venus_power_off(hdev);
1501 mutex_unlock(&hdev->lock);
1505 hdev->suspended = true;
1507 mutex_unlock(&hdev->lock);
1512 static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev)
1514 void __iomem *wrapper_base = hdev->core->wrapper_base;
1515 void __iomem *wrapper_tz_base = hdev->core->wrapper_tz_base;
1516 void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1517 u32 ctrl_status, cpu_status;
1519 if (IS_V6(hdev->core))
1520 cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
1522 cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
1523 ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1525 if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
1526 ctrl_status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
1532 static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev)
1534 void __iomem *wrapper_base = hdev->core->wrapper_base;
1535 void __iomem *wrapper_tz_base = hdev->core->wrapper_tz_base;
1536 void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1537 u32 ctrl_status, cpu_status;
1539 if (IS_V6(hdev->core))
1540 cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
1542 cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
1543 ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1545 if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
1546 ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
1552 static int venus_suspend_3xx(struct venus_core *core)
1554 struct venus_hfi_device *hdev = to_hfi_priv(core);
1555 struct device *dev = core->dev;
1556 void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
1561 if (!hdev->power_enabled || hdev->suspended)
1564 mutex_lock(&hdev->lock);
1565 ret = venus_is_valid_state(hdev);
1566 mutex_unlock(&hdev->lock);
1569 dev_err(dev, "bad state, cannot suspend\n");
1573 ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
1574 if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
1578 * Power collapse sequence for Venus 3xx and 4xx versions:
1579 * 1. Check for ARM9 and video core to be idle by checking WFI bit
1580 * (bit 0) in CPU status register and by checking Idle (bit 30) in
1581 * Control status register for video core.
1582 * 2. Send a command to prepare for power collapse.
1583 * 3. Check for WFI and PC_READY bits.
1585 ret = readx_poll_timeout(venus_cpu_and_video_core_idle, hdev, val, val,
1588 dev_err(dev, "wait for cpu and video core idle fail (%d)\n", ret);
1592 ret = venus_prepare_power_collapse(hdev, false);
1594 dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
1598 ret = readx_poll_timeout(venus_cpu_idle_and_pc_ready, hdev, val, val,
1604 mutex_lock(&hdev->lock);
1606 ret = venus_power_off(hdev);
1608 dev_err(dev, "venus_power_off (%d)\n", ret);
1609 mutex_unlock(&hdev->lock);
1613 hdev->suspended = true;
1615 mutex_unlock(&hdev->lock);
1620 static int venus_suspend(struct venus_core *core)
1622 if (IS_V3(core) || IS_V4(core) || IS_V6(core))
1623 return venus_suspend_3xx(core);
1625 return venus_suspend_1xx(core);
1628 static const struct hfi_ops venus_hfi_ops = {
1629 .core_init = venus_core_init,
1630 .core_deinit = venus_core_deinit,
1631 .core_ping = venus_core_ping,
1632 .core_trigger_ssr = venus_core_trigger_ssr,
1634 .session_init = venus_session_init,
1635 .session_end = venus_session_end,
1636 .session_abort = venus_session_abort,
1637 .session_flush = venus_session_flush,
1638 .session_start = venus_session_start,
1639 .session_stop = venus_session_stop,
1640 .session_continue = venus_session_continue,
1641 .session_etb = venus_session_etb,
1642 .session_ftb = venus_session_ftb,
1643 .session_set_buffers = venus_session_set_buffers,
1644 .session_unset_buffers = venus_session_unset_buffers,
1645 .session_load_res = venus_session_load_res,
1646 .session_release_res = venus_session_release_res,
1647 .session_parse_seq_hdr = venus_session_parse_seq_hdr,
1648 .session_get_seq_hdr = venus_session_get_seq_hdr,
1649 .session_set_property = venus_session_set_property,
1650 .session_get_property = venus_session_get_property,
1652 .resume = venus_resume,
1653 .suspend = venus_suspend,
1656 .isr_thread = venus_isr_thread,
1659 void venus_hfi_destroy(struct venus_core *core)
1661 struct venus_hfi_device *hdev = to_hfi_priv(core);
1664 venus_interface_queues_release(hdev);
1665 mutex_destroy(&hdev->lock);
1670 int venus_hfi_create(struct venus_core *core)
1672 struct venus_hfi_device *hdev;
1675 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
1679 mutex_init(&hdev->lock);
1682 hdev->suspended = true;
1684 core->ops = &venus_hfi_ops;
1686 ret = venus_interface_queues_init(hdev);
1699 void venus_hfi_queues_reinit(struct venus_core *core)
1701 struct venus_hfi_device *hdev = to_hfi_priv(core);
1702 struct hfi_queue_table_header *tbl_hdr;
1703 struct iface_queue *queue;
1704 struct hfi_sfr *sfr;
1707 mutex_lock(&hdev->lock);
1709 for (i = 0; i < IFACEQ_NUM; i++) {
1710 queue = &hdev->queues[i];
1712 IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
1714 venus_set_qhdr_defaults(queue->qhdr);
1716 queue->qhdr->start_addr = queue->qmem.da;
1718 if (i == IFACEQ_CMD_IDX)
1719 queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
1720 else if (i == IFACEQ_MSG_IDX)
1721 queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
1722 else if (i == IFACEQ_DBG_IDX)
1723 queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
1726 tbl_hdr = hdev->ifaceq_table.kva;
1727 tbl_hdr->version = 0;
1728 tbl_hdr->size = IFACEQ_TABLE_SIZE;
1729 tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
1730 tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
1731 tbl_hdr->num_q = IFACEQ_NUM;
1732 tbl_hdr->num_active_q = IFACEQ_NUM;
1735 * Set receive request to zero on debug queue as there is no
1736 * need of interrupt from video hardware for debug messages
1738 queue = &hdev->queues[IFACEQ_DBG_IDX];
1739 queue->qhdr->rx_req = 0;
1741 sfr = hdev->sfr.kva;
1742 sfr->buf_size = ALIGNED_SFR_SIZE;
1744 /* ensure table and queue header structs are settled in memory */
1747 mutex_unlock(&hdev->lock);